2026-03-10T08:31:50.128 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-10T08:31:50.133 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T08:31:50.158 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966 branch: squid description: orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 1-start 2-services/rgw-ingress 3-final} email: null first_in_suite: false flavor: default job_id: '966' ktype: distro last_in_suite: false machine_type: vps name: kyr-2026-03-10_01:00:38-orch-squid-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: ubuntu os_version: '22.04' overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 8043 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b targets: vm02.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLBucj8LSiaYjxAr5izAiZ+Uy2ZLf7L4H0IeCkoOlaD4kvBWbAYimcfLlkC98j5AGPfkfJG7Xnk7K0oDyBVY1Dg= vm07.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBA8R0oXYiprbU1heqOfNiR5suRdE/JEvy5GUuzwAmR0/T1pMw2S5cBTgeJM7nEpGUQv1ZoLrET7f0azYa3EIPF4= tasks: - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - cephadm.apply: specs: - placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 - placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: '{{VIP0}}/{{VIPPREFIXLEN}}' - cephadm.wait_for_service: service: rgw.foo - cephadm.wait_for_service: service: ingress.rgw.foo - cephadm.shell: host.a: - "echo \"Check while healthy...\"\ncurl http://{{VIP0}}:9000/\n\n# stop each\ \ rgw in turn\necho \"Check with each rgw stopped in turn...\"\nfor rgw in `ceph\ \ orch ps | grep ^rgw.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $rgw\n timeout 300 bash -c \"while ! ceph orch ps | grep $rgw | grep stopped;\ \ do echo 'Waiting for $rgw to stop'; ceph orch ps --daemon-type rgw; ceph health\ \ detail; sleep 5 ; done\"\n timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/\ \ ; do echo 'Waiting for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\ \n ceph orch daemon start $rgw\n timeout 300 bash -c \"while ! ceph orch ps\ \ | grep $rgw | grep running; do echo 'Waiting for $rgw to start'; ceph orch\ \ ps --daemon-type rgw; ceph health detail; sleep 5 ; done\"\ndone\n\n# stop\ \ each haproxy in turn\necho \"Check with each haproxy down in turn...\"\nfor\ \ haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do\n\ \ ceph orch daemon stop $haproxy\n timeout 300 bash -c \"while ! ceph orch\ \ ps | grep $haproxy | grep stopped; do echo 'Waiting for $haproxy to stop';\ \ ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\n\ \ timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting\ \ for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\n ceph orch\ \ daemon start $haproxy\n timeout 300 bash -c \"while ! ceph orch ps | grep\ \ $haproxy | grep running; do echo 'Waiting for $haproxy to start'; ceph orch\ \ ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\ndone\n\ntimeout\ \ 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting for http://{{VIP0}}:9000/\ \ to be available'; sleep 1 ; done\"\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-10_01:00:38 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-10T08:31:50.158 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa; will attempt to use it 2026-03-10T08:31:50.158 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_75a68fd8ca3f918fe9466b4c0bb385b7fc260a9b/qa/tasks 2026-03-10T08:31:50.158 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-10T08:31:50.159 INFO:teuthology.task.internal:Checking packages... 2026-03-10T08:31:50.159 INFO:teuthology.task.internal:Checking packages for os_type 'ubuntu', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-10T08:31:50.159 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-10T08:31:50.159 INFO:teuthology.packaging:ref: None 2026-03-10T08:31:50.159 INFO:teuthology.packaging:tag: None 2026-03-10T08:31:50.159 INFO:teuthology.packaging:branch: squid 2026-03-10T08:31:50.159 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T08:31:50.159 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&ref=squid 2026-03-10T08:31:50.886 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678-ge911bdeb-1jammy 2026-03-10T08:31:50.887 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-10T08:31:50.888 INFO:teuthology.task.internal:no buildpackages task found 2026-03-10T08:31:50.888 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-10T08:31:50.888 INFO:teuthology.task.internal:Saving configuration 2026-03-10T08:31:50.893 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-10T08:31:50.894 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-10T08:31:50.901 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm02.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 08:30:35.181486', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:02', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLBucj8LSiaYjxAr5izAiZ+Uy2ZLf7L4H0IeCkoOlaD4kvBWbAYimcfLlkC98j5AGPfkfJG7Xnk7K0oDyBVY1Dg='} 2026-03-10T08:31:50.907 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm07.local', 'description': '/archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-10 08:30:35.180855', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:07', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBA8R0oXYiprbU1heqOfNiR5suRdE/JEvy5GUuzwAmR0/T1pMw2S5cBTgeJM7nEpGUQv1ZoLrET7f0azYa3EIPF4='} 2026-03-10T08:31:50.907 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-10T08:31:50.907 INFO:teuthology.task.internal:roles: ubuntu@vm02.local - ['host.a', 'client.0'] 2026-03-10T08:31:50.908 INFO:teuthology.task.internal:roles: ubuntu@vm07.local - ['host.b', 'client.1'] 2026-03-10T08:31:50.908 INFO:teuthology.run_tasks:Running task console_log... 2026-03-10T08:31:50.915 DEBUG:teuthology.task.console_log:vm02 does not support IPMI; excluding 2026-03-10T08:31:50.921 DEBUG:teuthology.task.console_log:vm07 does not support IPMI; excluding 2026-03-10T08:31:50.922 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f0764e7e290>, signals=[15]) 2026-03-10T08:31:50.922 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-10T08:31:50.923 INFO:teuthology.task.internal:Opening connections... 2026-03-10T08:31:50.923 DEBUG:teuthology.task.internal:connecting to ubuntu@vm02.local 2026-03-10T08:31:50.923 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm02.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T08:31:50.985 DEBUG:teuthology.task.internal:connecting to ubuntu@vm07.local 2026-03-10T08:31:50.986 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm07.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T08:31:51.045 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-10T08:31:51.047 DEBUG:teuthology.orchestra.run.vm02:> uname -m 2026-03-10T08:31:51.054 INFO:teuthology.orchestra.run.vm02.stdout:x86_64 2026-03-10T08:31:51.054 DEBUG:teuthology.orchestra.run.vm02:> cat /etc/os-release 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:NAME="Ubuntu" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:VERSION_ID="22.04" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:VERSION_CODENAME=jammy 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:ID=ubuntu 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:ID_LIKE=debian 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:HOME_URL="https://www.ubuntu.com/" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-03-10T08:31:51.098 INFO:teuthology.orchestra.run.vm02.stdout:UBUNTU_CODENAME=jammy 2026-03-10T08:31:51.099 INFO:teuthology.lock.ops:Updating vm02.local on lock server 2026-03-10T08:31:51.103 DEBUG:teuthology.orchestra.run.vm07:> uname -m 2026-03-10T08:31:51.122 INFO:teuthology.orchestra.run.vm07.stdout:x86_64 2026-03-10T08:31:51.122 DEBUG:teuthology.orchestra.run.vm07:> cat /etc/os-release 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:NAME="Ubuntu" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:VERSION_ID="22.04" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:VERSION_CODENAME=jammy 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:ID=ubuntu 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:ID_LIKE=debian 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:HOME_URL="https://www.ubuntu.com/" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-03-10T08:31:51.168 INFO:teuthology.orchestra.run.vm07.stdout:UBUNTU_CODENAME=jammy 2026-03-10T08:31:51.168 INFO:teuthology.lock.ops:Updating vm07.local on lock server 2026-03-10T08:31:51.175 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-10T08:31:51.176 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-10T08:31:51.179 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-10T08:31:51.179 DEBUG:teuthology.orchestra.run.vm02:> test '!' -e /home/ubuntu/cephtest 2026-03-10T08:31:51.180 DEBUG:teuthology.orchestra.run.vm07:> test '!' -e /home/ubuntu/cephtest 2026-03-10T08:31:51.211 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-10T08:31:51.212 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-10T08:31:51.212 DEBUG:teuthology.orchestra.run.vm02:> test -z $(ls -A /var/lib/ceph) 2026-03-10T08:31:51.224 DEBUG:teuthology.orchestra.run.vm07:> test -z $(ls -A /var/lib/ceph) 2026-03-10T08:31:51.226 INFO:teuthology.orchestra.run.vm02.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T08:31:51.256 INFO:teuthology.orchestra.run.vm07.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-10T08:31:51.256 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-10T08:31:51.263 DEBUG:teuthology.orchestra.run.vm02:> test -e /ceph-qa-ready 2026-03-10T08:31:51.270 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:31:51.570 DEBUG:teuthology.orchestra.run.vm07:> test -e /ceph-qa-ready 2026-03-10T08:31:51.573 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:31:51.797 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-10T08:31:51.798 INFO:teuthology.task.internal:Creating test directory... 2026-03-10T08:31:51.799 DEBUG:teuthology.orchestra.run.vm02:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T08:31:51.800 DEBUG:teuthology.orchestra.run.vm07:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-10T08:31:51.802 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-10T08:31:51.804 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-10T08:31:51.805 INFO:teuthology.task.internal:Creating archive directory... 2026-03-10T08:31:51.805 DEBUG:teuthology.orchestra.run.vm02:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T08:31:51.844 DEBUG:teuthology.orchestra.run.vm07:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-10T08:31:51.849 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-10T08:31:51.850 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-10T08:31:51.850 DEBUG:teuthology.orchestra.run.vm02:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T08:31:51.889 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:31:51.889 DEBUG:teuthology.orchestra.run.vm07:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-10T08:31:51.891 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:31:51.891 DEBUG:teuthology.orchestra.run.vm02:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T08:31:51.932 DEBUG:teuthology.orchestra.run.vm07:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-10T08:31:51.938 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T08:31:51.940 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T08:31:51.942 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T08:31:51.944 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-10T08:31:51.945 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-10T08:31:51.946 INFO:teuthology.task.internal:Configuring sudo... 2026-03-10T08:31:51.947 DEBUG:teuthology.orchestra.run.vm02:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T08:31:51.987 DEBUG:teuthology.orchestra.run.vm07:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-10T08:31:51.993 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-10T08:31:51.996 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-10T08:31:51.996 DEBUG:teuthology.orchestra.run.vm02:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T08:31:52.036 DEBUG:teuthology.orchestra.run.vm07:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-10T08:31:52.038 DEBUG:teuthology.orchestra.run.vm02:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T08:31:52.082 DEBUG:teuthology.orchestra.run.vm02:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T08:31:52.126 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:31:52.126 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T08:31:52.179 DEBUG:teuthology.orchestra.run.vm07:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T08:31:52.181 DEBUG:teuthology.orchestra.run.vm07:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T08:31:52.227 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T08:31:52.227 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-10T08:31:52.275 DEBUG:teuthology.orchestra.run.vm02:> sudo service rsyslog restart 2026-03-10T08:31:52.276 DEBUG:teuthology.orchestra.run.vm07:> sudo service rsyslog restart 2026-03-10T08:31:52.332 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-10T08:31:52.333 INFO:teuthology.task.internal:Starting timer... 2026-03-10T08:31:52.333 INFO:teuthology.run_tasks:Running task pcp... 2026-03-10T08:31:52.336 INFO:teuthology.run_tasks:Running task selinux... 2026-03-10T08:31:52.338 INFO:teuthology.task.selinux:Excluding vm02: VMs are not yet supported 2026-03-10T08:31:52.338 INFO:teuthology.task.selinux:Excluding vm07: VMs are not yet supported 2026-03-10T08:31:52.338 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-10T08:31:52.338 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-10T08:31:52.338 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-10T08:31:52.338 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-10T08:31:52.340 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-10T08:31:52.340 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-10T08:31:52.341 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-10T08:31:52.934 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-10T08:31:52.939 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-10T08:31:52.939 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventorysdrbxhlo --limit vm02.local,vm07.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-10T08:33:57.690 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm02.local'), Remote(name='ubuntu@vm07.local')] 2026-03-10T08:33:57.691 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm02.local' 2026-03-10T08:33:57.691 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm02.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T08:33:57.753 DEBUG:teuthology.orchestra.run.vm02:> true 2026-03-10T08:33:57.952 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm02.local' 2026-03-10T08:33:57.952 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm07.local' 2026-03-10T08:33:57.953 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm07.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-10T08:33:58.014 DEBUG:teuthology.orchestra.run.vm07:> true 2026-03-10T08:33:58.216 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm07.local' 2026-03-10T08:33:58.216 INFO:teuthology.run_tasks:Running task clock... 2026-03-10T08:33:58.219 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-10T08:33:58.219 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T08:33:58.219 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T08:33:58.220 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-10T08:33:58.220 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Command line: ntpd -gq 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: ---------------------------------------------------- 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: ntp-4 is maintained by Network Time Foundation, 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: corporation. Support and training for ntp-4 are 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: available at https://www.nwtime.org/support 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: ---------------------------------------------------- 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: proto: precision = 0.029 usec (-25) 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: basedate set to 2022-02-04 2026-03-10T08:33:58.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: gps base set to 2022-02-06 (week 2196) 2026-03-10T08:33:58.237 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-03-10T08:33:58.237 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-03-10T08:33:58.237 INFO:teuthology.orchestra.run.vm02.stderr:10 Mar 08:33:58 ntpd[16081]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 73 days ago 2026-03-10T08:33:58.237 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Listen and drop on 0 v6wildcard [::]:123 2026-03-10T08:33:58.237 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-03-10T08:33:58.237 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Listen normally on 2 lo 127.0.0.1:123 2026-03-10T08:33:58.238 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Listen normally on 3 ens3 192.168.123.102:123 2026-03-10T08:33:58.238 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Listen normally on 4 lo [::1]:123 2026-03-10T08:33:58.238 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:2%2]:123 2026-03-10T08:33:58.238 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:58 ntpd[16081]: Listening on routing socket on fd #22 for interface updates 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Command line: ntpd -gq 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: ---------------------------------------------------- 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: ntp-4 is maintained by Network Time Foundation, 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: corporation. Support and training for ntp-4 are 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: available at https://www.nwtime.org/support 2026-03-10T08:33:58.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: ---------------------------------------------------- 2026-03-10T08:33:58.276 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: proto: precision = 0.029 usec (-25) 2026-03-10T08:33:58.276 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: basedate set to 2022-02-04 2026-03-10T08:33:58.276 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: gps base set to 2022-02-06 (week 2196) 2026-03-10T08:33:58.276 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-03-10T08:33:58.276 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-03-10T08:33:58.276 INFO:teuthology.orchestra.run.vm07.stderr:10 Mar 08:33:58 ntpd[16080]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 73 days ago 2026-03-10T08:33:58.277 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Listen and drop on 0 v6wildcard [::]:123 2026-03-10T08:33:58.277 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-03-10T08:33:58.277 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Listen normally on 2 lo 127.0.0.1:123 2026-03-10T08:33:58.278 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Listen normally on 3 ens3 192.168.123.107:123 2026-03-10T08:33:58.278 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Listen normally on 4 lo [::1]:123 2026-03-10T08:33:58.278 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:7%2]:123 2026-03-10T08:33:58.278 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:58 ntpd[16080]: Listening on routing socket on fd #22 for interface updates 2026-03-10T08:33:59.237 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:33:59 ntpd[16081]: Soliciting pool server 46.224.156.215 2026-03-10T08:33:59.276 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:33:59 ntpd[16080]: Soliciting pool server 46.224.156.215 2026-03-10T08:34:00.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:00 ntpd[16081]: Soliciting pool server 85.214.83.151 2026-03-10T08:34:00.236 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:00 ntpd[16081]: Soliciting pool server 217.144.138.234 2026-03-10T08:34:00.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:00 ntpd[16080]: Soliciting pool server 85.214.83.151 2026-03-10T08:34:00.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:00 ntpd[16080]: Soliciting pool server 217.144.138.234 2026-03-10T08:34:01.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:01 ntpd[16081]: Soliciting pool server 78.46.238.113 2026-03-10T08:34:01.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:01 ntpd[16081]: Soliciting pool server 217.145.99.9 2026-03-10T08:34:01.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:01 ntpd[16081]: Soliciting pool server 128.127.67.142 2026-03-10T08:34:01.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:01 ntpd[16080]: Soliciting pool server 78.46.238.113 2026-03-10T08:34:01.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:01 ntpd[16080]: Soliciting pool server 217.145.99.9 2026-03-10T08:34:01.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:01 ntpd[16080]: Soliciting pool server 128.127.67.142 2026-03-10T08:34:02.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:02 ntpd[16081]: Soliciting pool server 141.144.246.224 2026-03-10T08:34:02.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:02 ntpd[16081]: Soliciting pool server 104.167.24.26 2026-03-10T08:34:02.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:02 ntpd[16081]: Soliciting pool server 195.201.20.16 2026-03-10T08:34:02.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:02 ntpd[16081]: Soliciting pool server 90.187.112.137 2026-03-10T08:34:02.274 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:02 ntpd[16080]: Soliciting pool server 141.144.246.224 2026-03-10T08:34:02.274 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:02 ntpd[16080]: Soliciting pool server 104.167.24.26 2026-03-10T08:34:02.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:02 ntpd[16080]: Soliciting pool server 195.201.20.16 2026-03-10T08:34:02.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:02 ntpd[16080]: Soliciting pool server 90.187.112.137 2026-03-10T08:34:03.234 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:03 ntpd[16081]: Soliciting pool server 139.162.156.95 2026-03-10T08:34:03.234 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:03 ntpd[16081]: Soliciting pool server 188.68.34.173 2026-03-10T08:34:03.234 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:03 ntpd[16081]: Soliciting pool server 18.192.244.117 2026-03-10T08:34:03.235 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:03 ntpd[16081]: Soliciting pool server 185.125.190.58 2026-03-10T08:34:03.274 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:03 ntpd[16080]: Soliciting pool server 139.162.156.95 2026-03-10T08:34:03.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:03 ntpd[16080]: Soliciting pool server 188.68.34.173 2026-03-10T08:34:03.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:03 ntpd[16080]: Soliciting pool server 18.192.244.117 2026-03-10T08:34:03.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:03 ntpd[16080]: Soliciting pool server 185.125.190.58 2026-03-10T08:34:04.234 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:04 ntpd[16081]: Soliciting pool server 185.125.190.57 2026-03-10T08:34:04.234 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:04 ntpd[16081]: Soliciting pool server 178.63.67.56 2026-03-10T08:34:04.234 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:04 ntpd[16081]: Soliciting pool server 212.132.108.186 2026-03-10T08:34:04.234 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:04 ntpd[16081]: Soliciting pool server 162.159.200.123 2026-03-10T08:34:04.274 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:04 ntpd[16080]: Soliciting pool server 185.125.190.57 2026-03-10T08:34:04.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:04 ntpd[16080]: Soliciting pool server 178.63.67.56 2026-03-10T08:34:04.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:04 ntpd[16080]: Soliciting pool server 212.132.108.186 2026-03-10T08:34:04.275 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:04 ntpd[16080]: Soliciting pool server 162.159.200.123 2026-03-10T08:34:06.256 INFO:teuthology.orchestra.run.vm02.stdout:10 Mar 08:34:06 ntpd[16081]: ntpd: time slew +0.013637 s 2026-03-10T08:34:06.256 INFO:teuthology.orchestra.run.vm02.stdout:ntpd: time slew +0.013637s 2026-03-10T08:34:06.275 INFO:teuthology.orchestra.run.vm02.stdout: remote refid st t when poll reach delay offset jitter 2026-03-10T08:34:06.275 INFO:teuthology.orchestra.run.vm02.stdout:============================================================================== 2026-03-10T08:34:06.275 INFO:teuthology.orchestra.run.vm02.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:06.275 INFO:teuthology.orchestra.run.vm02.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:06.275 INFO:teuthology.orchestra.run.vm02.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:06.275 INFO:teuthology.orchestra.run.vm02.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:06.275 INFO:teuthology.orchestra.run.vm02.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:07.298 INFO:teuthology.orchestra.run.vm07.stdout:10 Mar 08:34:07 ntpd[16080]: ntpd: time slew +0.000098 s 2026-03-10T08:34:07.299 INFO:teuthology.orchestra.run.vm07.stdout:ntpd: time slew +0.000098s 2026-03-10T08:34:07.317 INFO:teuthology.orchestra.run.vm07.stdout: remote refid st t when poll reach delay offset jitter 2026-03-10T08:34:07.317 INFO:teuthology.orchestra.run.vm07.stdout:============================================================================== 2026-03-10T08:34:07.317 INFO:teuthology.orchestra.run.vm07.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:07.317 INFO:teuthology.orchestra.run.vm07.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:07.317 INFO:teuthology.orchestra.run.vm07.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:07.317 INFO:teuthology.orchestra.run.vm07.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:07.317 INFO:teuthology.orchestra.run.vm07.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:34:07.317 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-10T08:34:07.362 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-10T08:34:07.363 INFO:tasks.cephadm:Cluster image is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T08:34:07.363 INFO:tasks.cephadm:Cluster fsid is e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:34:07.363 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-10T08:34:07.363 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-10T08:34:07.363 INFO:tasks.cephadm:Monitor IPs: {'mon.vm02': '192.168.123.102', 'mon.vm07': '192.168.123.107'} 2026-03-10T08:34:07.363 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-10T08:34:07.363 DEBUG:teuthology.orchestra.run.vm02:> sudo hostname $(hostname -s) 2026-03-10T08:34:07.371 DEBUG:teuthology.orchestra.run.vm07:> sudo hostname $(hostname -s) 2026-03-10T08:34:07.377 INFO:tasks.cephadm:Downloading "compiled" cephadm from cachra 2026-03-10T08:34:07.377 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=ubuntu%2F22.04%2Fx86_64&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T08:34:07.958 INFO:tasks.cephadm:builder_project result: [{'url': 'https://1.chacra.ceph.com/r/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/ubuntu/jammy/flavors/default/', 'chacra_url': 'https://1.chacra.ceph.com/repos/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/ubuntu/jammy/flavors/default/', 'ref': 'squid', 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df', 'distro': 'ubuntu', 'distro_version': '22.04', 'distro_codename': 'jammy', 'modified': '2026-02-25 19:37:07.680480', 'status': 'ready', 'flavor': 'default', 'project': 'ceph', 'archs': ['x86_64'], 'extra': {'version': '19.2.3-678-ge911bdeb', 'package_manager_version': '19.2.3-678-ge911bdeb-1jammy', 'build_url': 'https://jenkins.ceph.com/job/ceph-dev-pipeline/3275/', 'root_build_cause': '', 'node_name': '10.20.192.98+toko08', 'job_name': 'ceph-dev-pipeline'}}] 2026-03-10T08:34:08.615 INFO:tasks.util.chacra:got chacra host 1.chacra.ceph.com, ref squid, sha1 e911bdebe5c8faa3800735d1568fcdca65db60df from https://shaman.ceph.com/api/search/?project=ceph&distros=ubuntu%2F22.04%2Fx86_64&flavor=default&sha1=e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T08:34:08.616 INFO:tasks.cephadm:Discovered cachra url: https://1.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/ubuntu/jammy/x86_64/flavors/default/cephadm 2026-03-10T08:34:08.616 INFO:tasks.cephadm:Downloading cephadm from url: https://1.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/ubuntu/jammy/x86_64/flavors/default/cephadm 2026-03-10T08:34:08.616 DEBUG:teuthology.orchestra.run.vm02:> curl --silent -L https://1.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/ubuntu/jammy/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T08:34:10.058 INFO:teuthology.orchestra.run.vm02.stdout:-rw-rw-r-- 1 ubuntu ubuntu 795696 Mar 10 08:34 /home/ubuntu/cephtest/cephadm 2026-03-10T08:34:10.058 DEBUG:teuthology.orchestra.run.vm07:> curl --silent -L https://1.chacra.ceph.com/binaries/ceph/squid/e911bdebe5c8faa3800735d1568fcdca65db60df/ubuntu/jammy/x86_64/flavors/default/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-10T08:34:11.411 INFO:teuthology.orchestra.run.vm07.stdout:-rw-rw-r-- 1 ubuntu ubuntu 795696 Mar 10 08:34 /home/ubuntu/cephtest/cephadm 2026-03-10T08:34:11.411 DEBUG:teuthology.orchestra.run.vm02:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T08:34:11.415 DEBUG:teuthology.orchestra.run.vm07:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-10T08:34:11.422 INFO:tasks.cephadm:Pulling image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on all hosts... 2026-03-10T08:34:11.422 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-10T08:34:11.458 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df pull 2026-03-10T08:34:11.547 INFO:teuthology.orchestra.run.vm02.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T08:34:11.554 INFO:teuthology.orchestra.run.vm07.stderr:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T08:34:59.365 INFO:teuthology.orchestra.run.vm07.stdout:{ 2026-03-10T08:34:59.366 INFO:teuthology.orchestra.run.vm07.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-10T08:34:59.366 INFO:teuthology.orchestra.run.vm07.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-10T08:34:59.366 INFO:teuthology.orchestra.run.vm07.stdout: "repo_digests": [ 2026-03-10T08:34:59.366 INFO:teuthology.orchestra.run.vm07.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-10T08:34:59.366 INFO:teuthology.orchestra.run.vm07.stdout: ] 2026-03-10T08:34:59.366 INFO:teuthology.orchestra.run.vm07.stdout:} 2026-03-10T08:34:59.986 INFO:teuthology.orchestra.run.vm02.stdout:{ 2026-03-10T08:34:59.986 INFO:teuthology.orchestra.run.vm02.stdout: "ceph_version": "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)", 2026-03-10T08:34:59.986 INFO:teuthology.orchestra.run.vm02.stdout: "image_id": "654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c", 2026-03-10T08:34:59.986 INFO:teuthology.orchestra.run.vm02.stdout: "repo_digests": [ 2026-03-10T08:34:59.987 INFO:teuthology.orchestra.run.vm02.stdout: "quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc" 2026-03-10T08:34:59.987 INFO:teuthology.orchestra.run.vm02.stdout: ] 2026-03-10T08:34:59.987 INFO:teuthology.orchestra.run.vm02.stdout:} 2026-03-10T08:35:00.000 DEBUG:teuthology.orchestra.run.vm02:> sudo mkdir -p /etc/ceph 2026-03-10T08:35:00.010 DEBUG:teuthology.orchestra.run.vm07:> sudo mkdir -p /etc/ceph 2026-03-10T08:35:00.022 DEBUG:teuthology.orchestra.run.vm02:> sudo chmod 777 /etc/ceph 2026-03-10T08:35:00.060 DEBUG:teuthology.orchestra.run.vm07:> sudo chmod 777 /etc/ceph 2026-03-10T08:35:00.076 INFO:tasks.cephadm:Writing seed config... 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-10T08:35:00.076 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-10T08:35:00.077 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:35:00.077 DEBUG:teuthology.orchestra.run.vm02:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-10T08:35:00.105 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = e750d050-1c5b-11f1-9e63-531fde0192f6 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-10T08:35:00.106 DEBUG:teuthology.orchestra.run.vm02:mon.vm02> sudo journalctl -f -n 0 -u ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02.service 2026-03-10T08:35:00.147 INFO:tasks.cephadm:Bootstrapping... 2026-03-10T08:35:00.147 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df -v bootstrap --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.102 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-10T08:35:00.284 INFO:teuthology.orchestra.run.vm02.stdout:-------------------------------------------------------------------------------- 2026-03-10T08:35:00.284 INFO:teuthology.orchestra.run.vm02.stdout:cephadm ['--image', 'quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df', '-v', 'bootstrap', '--fsid', 'e750d050-1c5b-11f1-9e63-531fde0192f6', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.102', '--skip-admin-label'] 2026-03-10T08:35:00.284 INFO:teuthology.orchestra.run.vm02.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-10T08:35:00.284 INFO:teuthology.orchestra.run.vm02.stdout:Verifying podman|docker is present... 2026-03-10T08:35:00.284 INFO:teuthology.orchestra.run.vm02.stdout:Verifying lvm2 is present... 2026-03-10T08:35:00.284 INFO:teuthology.orchestra.run.vm02.stdout:Verifying time synchronization is in place... 2026-03-10T08:35:00.288 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-10T08:35:00.288 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T08:35:00.291 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-10T08:35:00.291 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.294 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled chronyd.service 2026-03-10T08:35:00.294 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for chronyd.service: No such file or directory 2026-03-10T08:35:00.297 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active chronyd.service 2026-03-10T08:35:00.297 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.300 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled systemd-timesyncd.service 2026-03-10T08:35:00.300 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout masked 2026-03-10T08:35:00.303 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active systemd-timesyncd.service 2026-03-10T08:35:00.303 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.306 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled ntpd.service 2026-03-10T08:35:00.306 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for ntpd.service: No such file or directory 2026-03-10T08:35:00.309 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active ntpd.service 2026-03-10T08:35:00.309 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.312 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout enabled 2026-03-10T08:35:00.315 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout active 2026-03-10T08:35:00.315 INFO:teuthology.orchestra.run.vm02.stdout:Unit ntp.service is enabled and running 2026-03-10T08:35:00.315 INFO:teuthology.orchestra.run.vm02.stdout:Repeating the final host check... 2026-03-10T08:35:00.315 INFO:teuthology.orchestra.run.vm02.stdout:docker (/usr/bin/docker) is present 2026-03-10T08:35:00.315 INFO:teuthology.orchestra.run.vm02.stdout:systemctl is present 2026-03-10T08:35:00.315 INFO:teuthology.orchestra.run.vm02.stdout:lvcreate is present 2026-03-10T08:35:00.317 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-10T08:35:00.317 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-10T08:35:00.320 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-10T08:35:00.320 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.322 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled chronyd.service 2026-03-10T08:35:00.322 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for chronyd.service: No such file or directory 2026-03-10T08:35:00.324 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active chronyd.service 2026-03-10T08:35:00.324 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.326 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled systemd-timesyncd.service 2026-03-10T08:35:00.326 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout masked 2026-03-10T08:35:00.328 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active systemd-timesyncd.service 2026-03-10T08:35:00.328 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.331 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl is-enabled ntpd.service 2026-03-10T08:35:00.331 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to get unit file state for ntpd.service: No such file or directory 2026-03-10T08:35:00.333 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 3 from systemctl is-active ntpd.service 2026-03-10T08:35:00.333 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout inactive 2026-03-10T08:35:00.335 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout enabled 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stdout active 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Unit ntp.service is enabled and running 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Host looks OK 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Cluster fsid: e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Acquiring lock 140477592176528 on /run/cephadm/e750d050-1c5b-11f1-9e63-531fde0192f6.lock 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Lock 140477592176528 acquired on /run/cephadm/e750d050-1c5b-11f1-9e63-531fde0192f6.lock 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Verifying IP 192.168.123.102 port 3300 ... 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Verifying IP 192.168.123.102 port 6789 ... 2026-03-10T08:35:00.338 INFO:teuthology.orchestra.run.vm02.stdout:Base mon IP(s) is [192.168.123.102:3300, 192.168.123.102:6789], mon addrv is [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-10T08:35:00.340 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.102 metric 100 2026-03-10T08:35:00.340 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-03-10T08:35:00.340 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout 192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.102 metric 100 2026-03-10T08:35:00.340 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout 192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.102 metric 100 2026-03-10T08:35:00.341 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-10T08:35:00.341 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout fe80::/64 dev ens3 proto kernel metric 256 pref medium 2026-03-10T08:35:00.342 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-10T08:35:00.342 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-10T08:35:00.342 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-10T08:35:00.342 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout 2: ens3: mtu 1500 state UP qlen 1000 2026-03-10T08:35:00.342 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout inet6 fe80::5055:ff:fe00:2/64 scope link 2026-03-10T08:35:00.342 INFO:teuthology.orchestra.run.vm02.stdout:/usr/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-10T08:35:00.343 INFO:teuthology.orchestra.run.vm02.stdout:Mon IP `192.168.123.102` is in CIDR network `192.168.123.0/24` 2026-03-10T08:35:00.343 INFO:teuthology.orchestra.run.vm02.stdout:Mon IP `192.168.123.102` is in CIDR network `192.168.123.0/24` 2026-03-10T08:35:00.343 INFO:teuthology.orchestra.run.vm02.stdout:Mon IP `192.168.123.102` is in CIDR network `192.168.123.1/32` 2026-03-10T08:35:00.343 INFO:teuthology.orchestra.run.vm02.stdout:Mon IP `192.168.123.102` is in CIDR network `192.168.123.1/32` 2026-03-10T08:35:00.343 INFO:teuthology.orchestra.run.vm02.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24', '192.168.123.1/32', '192.168.123.1/32'] 2026-03-10T08:35:00.343 INFO:teuthology.orchestra.run.vm02.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-10T08:35:00.344 INFO:teuthology.orchestra.run.vm02.stdout:Pulling container image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df... 2026-03-10T08:35:01.418 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/docker: stdout e911bdebe5c8faa3800735d1568fcdca65db60df: Pulling from ceph-ci/ceph 2026-03-10T08:35:01.418 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/docker: stdout Digest: sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc 2026-03-10T08:35:01.418 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/docker: stdout Status: Image is up to date for quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T08:35:01.418 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/docker: stdout quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-10T08:35:01.619 INFO:teuthology.orchestra.run.vm02.stdout:ceph: stdout ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-10T08:35:01.619 INFO:teuthology.orchestra.run.vm02.stdout:Ceph version: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable) 2026-03-10T08:35:01.619 INFO:teuthology.orchestra.run.vm02.stdout:Extracting ceph user uid/gid from container image... 2026-03-10T08:35:01.761 INFO:teuthology.orchestra.run.vm02.stdout:stat: stdout 167 167 2026-03-10T08:35:01.761 INFO:teuthology.orchestra.run.vm02.stdout:Creating initial keys... 2026-03-10T08:35:01.906 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-authtool: stdout AQC1169pjqcFNBAAUPKxXwyJXmCGzOfzDQGgaQ== 2026-03-10T08:35:02.020 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-authtool: stdout AQC1169p+kwROxAAxWYg6dQUeBimUxvP10A3rg== 2026-03-10T08:35:02.116 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-authtool: stdout AQC2169pM5AXBRAAjG+C/LGJi/L0jCI/I5v+VA== 2026-03-10T08:35:02.116 INFO:teuthology.orchestra.run.vm02.stdout:Creating initial monmap... 2026-03-10T08:35:02.244 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:monmaptool for vm02 [v2:192.168.123.102:3300,v1:192.168.123.102:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:setting min_mon_release = quincy 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: set fsid to e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:02.245 INFO:teuthology.orchestra.run.vm02.stdout:Creating mon... 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 0 set uid:gid to 167:167 (ceph:ceph) 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 1 imported monmap: 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr epoch 0 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr last_changed 2026-03-10T08:35:02.208022+0000 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr created 2026-03-10T08:35:02.208022+0000 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr min_mon_release 17 (quincy) 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr election_strategy: 1 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 0 /usr/bin/ceph-mon: set fsid to e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: RocksDB version: 7.9.2 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Git sha 0 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: DB SUMMARY 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: DB Session ID: RN8ZFUTHV9NFDDP4L43K 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm02/store.db dir, Total Num: 0, files: 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm02/store.db: 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.error_if_exists: 0 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.create_if_missing: 1 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.paranoid_checks: 1 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T08:35:02.397 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.env: 0x55d11cde3dc0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.fs: PosixFileSystem 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.info_log: 0x55d12776ce60 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_file_opening_threads: 16 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.statistics: (nil) 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.use_fsync: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_log_file_size: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.keep_log_file_num: 1000 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.recycle_log_file_num: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.allow_fallocate: 1 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.allow_mmap_reads: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.allow_mmap_writes: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.use_direct_reads: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.create_missing_column_families: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.db_log_dir: 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.wal_dir: 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.advise_random_on_open: 1 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.db_write_buffer_size: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.write_buffer_manager: 0x55d1277635e0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.rate_limiter: (nil) 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.wal_recovery_mode: 2 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.enable_thread_tracking: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.enable_pipelined_write: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.unordered_write: 0 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.row_cache: None 2026-03-10T08:35:02.398 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.wal_filter: None 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.allow_ingest_behind: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.two_write_queues: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.manual_wal_flush: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.wal_compression: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.atomic_flush: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.log_readahead_size: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.best_efforts_recovery: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.allow_data_in_errors: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.db_host_id: __hostname__ 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_background_jobs: 2 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_background_compactions: -1 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_subcompactions: 1 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_total_wal_size: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_open_files: -1 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.bytes_per_sync: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.compaction_readahead_size: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Options.max_background_flushes: -1 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Compression algorithms supported: 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kZSTD supported: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kXpressCompression supported: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kBZip2Compression supported: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kLZ4Compression supported: 1 2026-03-10T08:35:02.399 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kZlibCompression supported: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kLZ4HCCompression supported: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: kSnappyCompression supported: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.337+0000 7f4246631d80 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: [db/db_impl/db_impl_open.cc:317] Creating manifest 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm02/store.db/MANIFEST-000001 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.merge_operator: 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_filter: None 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_filter_factory: None 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.sst_partitioner_factory: None 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55d12775f580) 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr cache_index_and_filter_blocks: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr pin_top_level_index_and_filter: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr index_type: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr data_block_index_type: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr index_shortening: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr data_block_hash_table_util_ratio: 0.750000 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr checksum: 4 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr no_block_cache: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_cache: 0x55d127785350 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_cache_name: BinnedLRUCache 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_cache_options: 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr capacity : 536870912 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr num_shard_bits : 4 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr strict_capacity_limit : 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr high_pri_pool_ratio: 0.000 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_cache_compressed: (nil) 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr persistent_cache: (nil) 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_size: 4096 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_size_deviation: 10 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_restart_interval: 16 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr index_block_restart_interval: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr metadata_block_size: 4096 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr partition_filters: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr use_delta_encoding: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr filter_policy: bloomfilter 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr whole_key_filtering: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr verify_compression: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr read_amp_bytes_per_bit: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr format_version: 5 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr enable_index_compression: 1 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr block_align: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr max_auto_readahead_size: 262144 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr prepopulate_block_cache: 0 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr initial_auto_readahead_size: 8192 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr num_file_reads_for_auto_readahead: 2 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.write_buffer_size: 33554432 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_write_buffer_number: 2 2026-03-10T08:35:02.400 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression: NoCompression 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression: Disabled 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.prefix_extractor: nullptr 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.num_levels: 7 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.level: 32767 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.strategy: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.enabled: false 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.target_file_size_base: 67108864 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.arena_block_size: 1048576 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.disable_auto_compactions: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.inplace_update_support: 0 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T08:35:02.401 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.bloom_locality: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.max_successive_merges: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.paranoid_file_checks: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.force_consistency_checks: 1 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.report_bg_io_stats: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.ttl: 2592000 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.enable_blob_files: false 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.min_blob_size: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.blob_file_size: 268435456 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.blob_file_starting_level: 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm02/store.db/MANIFEST-000001 succeeded,manifest_file_number is 1, next_file_number is 3, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: fcd20f39-e156-4bc1-8ab2-15ef2b2742fa 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.341+0000 7f4246631d80 4 rocksdb: [db/version_set.cc:5047] Creating manifest 5 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.345+0000 7f4246631d80 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55d127786e00 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.345+0000 7f4246631d80 4 rocksdb: DB pointer 0x55d12786a000 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.345+0000 7f423ddbb640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.345+0000 7f423ddbb640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr ** DB Stats ** 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr ** Compaction Stats [default] ** 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr ** Compaction Stats [default] ** 2026-03-10T08:35:02.402 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr AddFile(Total Files): cumulative 0, interval 0 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr AddFile(Keys): cumulative 0, interval 0 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Block cache BinnedLRUCache@0x55d127785350#7 capacity: 512.00 MB usage: 0.00 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 9e-06 secs_since: 0 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr Block cache entry stats(count,size,portion): Misc(1,0.00 KB,0%) 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr ** File Read Latency Histogram By Level [default] ** 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.349+0000 7f4246631d80 4 rocksdb: [db/db_impl/db_impl.cc:496] Shutdown: canceling all background work 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.349+0000 7f4246631d80 4 rocksdb: [db/db_impl/db_impl.cc:704] Shutdown complete 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph-mon: stderr debug 2026-03-10T08:35:02.349+0000 7f4246631d80 0 /usr/bin/ceph-mon: created monfs at /var/lib/ceph/mon/ceph-vm02 for mon.vm02 2026-03-10T08:35:02.403 INFO:teuthology.orchestra.run.vm02.stdout:create mon.vm02 on 2026-03-10T08:35:02.741 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-10T08:35:02.924 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6.target → /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6.target. 2026-03-10T08:35:02.924 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6.target → /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6.target. 2026-03-10T08:35:03.110 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02 2026-03-10T08:35:03.110 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to reset failed state of unit ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02.service: Unit ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02.service not loaded. 2026-03-10T08:35:03.307 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6.target.wants/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02.service → /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service. 2026-03-10T08:35:03.320 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-10T08:35:03.320 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to enable service . firewalld.service is not available 2026-03-10T08:35:03.320 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mon to start... 2026-03-10T08:35:03.320 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mon... 2026-03-10T08:35:03.538 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:03 vm02 bash[16998]: cluster 2026-03-10T08:35:03.445440+0000 mon.vm02 (mon.0) 1 : cluster [INF] mon.vm02 is new leader, mons vm02 in quorum (ranks 0) 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout cluster: 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout id: e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout services: 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm02 (age 0.074147s) 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout data: 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout pgs: 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:mon is available 2026-03-10T08:35:03.568 INFO:teuthology.orchestra.run.vm02.stdout:Assimilating anything we can from ceph.conf... 2026-03-10T08:35:03.817 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [global] 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout fsid = e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [osd] 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-10T08:35:03.818 INFO:teuthology.orchestra.run.vm02.stdout:Generating new minimal ceph.conf... 2026-03-10T08:35:04.028 INFO:teuthology.orchestra.run.vm02.stdout:Restarting the monitor... 2026-03-10T08:35:04.076 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 systemd[1]: Stopping Ceph mon.vm02 for e750d050-1c5b-11f1-9e63-531fde0192f6... 2026-03-10T08:35:04.263 INFO:teuthology.orchestra.run.vm02.stdout:Setting public_network to 192.168.123.0/24,192.168.123.1/32 in mon config section 2026-03-10T08:35:04.333 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[16998]: debug 2026-03-10T08:35:04.069+0000 7f653d6ea640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm02 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-03-10T08:35:04.334 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[16998]: debug 2026-03-10T08:35:04.069+0000 7f653d6ea640 -1 mon.vm02@0(leader) e1 *** Got Signal Terminated *** 2026-03-10T08:35:04.334 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17385]: ceph-e750d050-1c5b-11f1-9e63-531fde0192f6-mon-vm02 2026-03-10T08:35:04.334 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 systemd[1]: ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02.service: Deactivated successfully. 2026-03-10T08:35:04.334 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 systemd[1]: Stopped Ceph mon.vm02 for e750d050-1c5b-11f1-9e63-531fde0192f6. 2026-03-10T08:35:04.334 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 systemd[1]: Started Ceph mon.vm02 for e750d050-1c5b-11f1-9e63-531fde0192f6. 2026-03-10T08:35:04.531 INFO:teuthology.orchestra.run.vm02.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-10T08:35:04.532 INFO:teuthology.orchestra.run.vm02.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-10T08:35:04.532 INFO:teuthology.orchestra.run.vm02.stdout:Creating mgr... 2026-03-10T08:35:04.532 INFO:teuthology.orchestra.run.vm02.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-10T08:35:04.532 INFO:teuthology.orchestra.run.vm02.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-10T08:35:04.532 INFO:teuthology.orchestra.run.vm02.stdout:Verifying port 0.0.0.0:8443 ... 2026-03-10T08:35:04.679 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.397+0000 7fdb045ddd80 0 set uid:gid to 167:167 (ceph:ceph) 2026-03-10T08:35:04.679 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.397+0000 7fdb045ddd80 0 ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 7 2026-03-10T08:35:04.679 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.397+0000 7fdb045ddd80 0 pidfile_write: ignore empty --pid-file 2026-03-10T08:35:04.679 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.397+0000 7fdb045ddd80 0 load: jerasure load: lrc 2026-03-10T08:35:04.679 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: RocksDB version: 7.9.2 2026-03-10T08:35:04.679 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Git sha 0 2026-03-10T08:35:04.679 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: DB SUMMARY 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: DB Session ID: WQZM7LMYA71UI0Y7UFCN 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: CURRENT file: CURRENT 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: IDENTITY file: IDENTITY 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm02/store.db dir, Total Num: 1, files: 000008.sst 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm02/store.db: 000009.log size: 75071 ; 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.error_if_exists: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.create_if_missing: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.paranoid_checks: 1 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.env: 0x558e52f3bdc0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.fs: PosixFileSystem 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.info_log: 0x558e7cf659a0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_file_opening_threads: 16 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.statistics: (nil) 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.use_fsync: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_log_file_size: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.keep_log_file_num: 1000 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.recycle_log_file_num: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.allow_fallocate: 1 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.allow_mmap_reads: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.allow_mmap_writes: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.use_direct_reads: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.create_missing_column_families: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.db_log_dir: 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.wal_dir: 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.advise_random_on_open: 1 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.db_write_buffer_size: 0 2026-03-10T08:35:04.680 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.write_buffer_manager: 0x558e7cf69900 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.rate_limiter: (nil) 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.wal_recovery_mode: 2 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.enable_thread_tracking: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.enable_pipelined_write: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.unordered_write: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.row_cache: None 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.wal_filter: None 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.allow_ingest_behind: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.two_write_queues: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.manual_wal_flush: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.wal_compression: 0 2026-03-10T08:35:04.681 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.atomic_flush: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.log_readahead_size: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.best_efforts_recovery: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.allow_data_in_errors: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.db_host_id: __hostname__ 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_background_jobs: 2 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_background_compactions: -1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_subcompactions: 1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_total_wal_size: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_open_files: -1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.bytes_per_sync: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.compaction_readahead_size: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Options.max_background_flushes: -1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: Compression algorithms supported: 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kZSTD supported: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kXpressCompression supported: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kBZip2Compression supported: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kLZ4Compression supported: 1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kZlibCompression supported: 1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kLZ4HCCompression supported: 1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.401+0000 7fdb045ddd80 4 rocksdb: kSnappyCompression supported: 1 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm02/store.db/MANIFEST-000010 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.merge_operator: 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compaction_filter: None 2026-03-10T08:35:04.682 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compaction_filter_factory: None 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.sst_partitioner_factory: None 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x558e7cf645c0) 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cache_index_and_filter_blocks: 1 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: pin_top_level_index_and_filter: 1 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: index_type: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: data_block_index_type: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: index_shortening: 1 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: data_block_hash_table_util_ratio: 0.750000 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: checksum: 4 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: no_block_cache: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_cache: 0x558e7cf8b350 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_cache_name: BinnedLRUCache 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_cache_options: 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: capacity : 536870912 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: num_shard_bits : 4 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: strict_capacity_limit : 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: high_pri_pool_ratio: 0.000 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_cache_compressed: (nil) 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: persistent_cache: (nil) 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_size: 4096 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_size_deviation: 10 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_restart_interval: 16 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: index_block_restart_interval: 1 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: metadata_block_size: 4096 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: partition_filters: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: use_delta_encoding: 1 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: filter_policy: bloomfilter 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: whole_key_filtering: 1 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: verify_compression: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: read_amp_bytes_per_bit: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: format_version: 5 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: enable_index_compression: 1 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: block_align: 0 2026-03-10T08:35:04.683 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: max_auto_readahead_size: 262144 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: prepopulate_block_cache: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: initial_auto_readahead_size: 8192 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: num_file_reads_for_auto_readahead: 2 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.write_buffer_size: 33554432 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_write_buffer_number: 2 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression: NoCompression 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression: Disabled 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.prefix_extractor: nullptr 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.num_levels: 7 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.level: 32767 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.strategy: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.enabled: false 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.target_file_size_base: 67108864 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.arena_block_size: 1048576 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.disable_auto_compactions: 0 2026-03-10T08:35:04.684 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.405+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.inplace_update_support: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.bloom_locality: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.max_successive_merges: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.paranoid_file_checks: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.force_consistency_checks: 1 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.report_bg_io_stats: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.ttl: 2592000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.enable_blob_files: false 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.min_blob_size: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.blob_file_size: 268435456 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.blob_file_starting_level: 0 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm02/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: fcd20f39-e156-4bc1-8ab2-15ef2b2742fa 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1773131704415176, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.409+0000 7fdb045ddd80 4 rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.413+0000 7fdb045ddd80 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1773131704417226, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 72139, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 223, "table_properties": {"data_size": 70418, "index_size": 174, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 9562, "raw_average_key_size": 49, "raw_value_size": 65043, "raw_average_value_size": 335, "num_data_blocks": 8, "num_entries": 194, "num_filter_entries": 194, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773131704, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "fcd20f39-e156-4bc1-8ab2-15ef2b2742fa", "db_session_id": "WQZM7LMYA71UI0Y7UFCN", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.413+0000 7fdb045ddd80 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1773131704417544, "job": 1, "event": "recovery_finished"} 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.413+0000 7fdb045ddd80 4 rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdb045ddd80 4 rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm02/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdb045ddd80 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x558e7cf8ce00 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdb045ddd80 4 rocksdb: DB pointer 0x558e7d09a000 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdb045ddd80 0 starting mon.vm02 rank 0 at public addrs [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] at bind addrs [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon_data /var/lib/ceph/mon/ceph-vm02 fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdafa3a7640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdafa3a7640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: ** DB Stats ** 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: ** Compaction Stats [default] ** 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T08:35:04.685 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: L0 2/0 72.32 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 43.7 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Sum 2/0 72.32 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 43.7 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 43.7 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: ** Compaction Stats [default] ** 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 43.7 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: AddFile(Keys): cumulative 0, interval 0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Cumulative compaction: 0.00 GB write, 5.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Interval compaction: 0.00 GB write, 5.01 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Block cache BinnedLRUCache@0x558e7cf8b350#7 capacity: 512.00 MB usage: 6.09 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 7e-06 secs_since: 0 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: Block cache entry stats(count,size,portion): DataBlock(2,5.03 KB,0.000959635%) FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: ** File Read Latency Histogram By Level [default] ** 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdb045ddd80 1 mon.vm02@-1(???) e1 preinit fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdb045ddd80 0 mon.vm02@-1(???).mds e1 new map 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.417+0000 7fdb045ddd80 0 mon.vm02@-1(???).mds e1 print_map 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: e1 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: btime 2026-03-10T08:35:03:450016+0000 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: legacy client fscid: -1 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: No filesystems configured 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.421+0000 7fdb045ddd80 0 mon.vm02@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.421+0000 7fdb045ddd80 0 mon.vm02@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.421+0000 7fdb045ddd80 0 mon.vm02@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.421+0000 7fdb045ddd80 0 mon.vm02@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: debug 2026-03-10T08:35:04.421+0000 7fdb045ddd80 1 mon.vm02@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427873+0000 mon.vm02 (mon.0) 1 : cluster [INF] mon.vm02 is new leader, mons vm02 in quorum (ranks 0) 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427873+0000 mon.vm02 (mon.0) 1 : cluster [INF] mon.vm02 is new leader, mons vm02 in quorum (ranks 0) 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427912+0000 mon.vm02 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427912+0000 mon.vm02 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427925+0000 mon.vm02 (mon.0) 3 : cluster [DBG] fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427925+0000 mon.vm02 (mon.0) 3 : cluster [DBG] fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427928+0000 mon.vm02 (mon.0) 4 : cluster [DBG] last_changed 2026-03-10T08:35:02.208022+0000 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427928+0000 mon.vm02 (mon.0) 4 : cluster [DBG] last_changed 2026-03-10T08:35:02.208022+0000 2026-03-10T08:35:04.686 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427936+0000 mon.vm02 (mon.0) 5 : cluster [DBG] created 2026-03-10T08:35:02.208022+0000 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427936+0000 mon.vm02 (mon.0) 5 : cluster [DBG] created 2026-03-10T08:35:02.208022+0000 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427940+0000 mon.vm02 (mon.0) 6 : cluster [DBG] min_mon_release 19 (squid) 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427940+0000 mon.vm02 (mon.0) 6 : cluster [DBG] min_mon_release 19 (squid) 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427944+0000 mon.vm02 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427944+0000 mon.vm02 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427949+0000 mon.vm02 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.427949+0000 mon.vm02 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.428183+0000 mon.vm02 (mon.0) 9 : cluster [DBG] fsmap 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.428183+0000 mon.vm02 (mon.0) 9 : cluster [DBG] fsmap 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.428195+0000 mon.vm02 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.428195+0000 mon.vm02 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.428855+0000 mon.vm02 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: cluster 2026-03-10T08:35:04.428855+0000 mon.vm02 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: audit 2026-03-10T08:35:04.476248+0000 mon.vm02 (mon.0) 12 : audit [INF] from='client.? 192.168.123.102:0/1429830139' entity='client.admin' 2026-03-10T08:35:04.687 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 bash[17473]: audit 2026-03-10T08:35:04.476248+0000 mon.vm02 (mon.0) 12 : audit [INF] from='client.? 192.168.123.102:0/1429830139' entity='client.admin' 2026-03-10T08:35:04.744 INFO:teuthology.orchestra.run.vm02.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mgr.vm02.ttibzz 2026-03-10T08:35:04.744 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Failed to reset failed state of unit ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mgr.vm02.ttibzz.service: Unit ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mgr.vm02.ttibzz.service not loaded. 2026-03-10T08:35:04.906 INFO:teuthology.orchestra.run.vm02.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6.target.wants/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mgr.vm02.ttibzz.service → /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service. 2026-03-10T08:35:04.914 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-10T08:35:04.914 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to enable service . firewalld.service is not available 2026-03-10T08:35:04.914 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-10T08:35:04.914 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-03-10T08:35:04.914 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr to start... 2026-03-10T08:35:04.915 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr... 2026-03-10T08:35:04.931 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:04.931 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:04 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e750d050-1c5b-11f1-9e63-531fde0192f6", 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-10T08:35:05.168 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 0, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:05.169 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T08:35:05.170 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T08:35:05.170 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T08:35:05.170 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T08:35:05.170 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T08:35:03:450016+0000", 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T08:35:03.450673+0000", 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-10T08:35:05.171 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (1/15)... 2026-03-10T08:35:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:05 vm02 bash[17473]: audit 2026-03-10T08:35:05.120808+0000 mon.vm02 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.102:0/2467793842' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:35:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:05 vm02 bash[17473]: audit 2026-03-10T08:35:05.120808+0000 mon.vm02 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.102:0/2467793842' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e750d050-1c5b-11f1-9e63-531fde0192f6", 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 2, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T08:35:07.454 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T08:35:03:450016+0000", 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T08:35:03.450673+0000", 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-10T08:35:07.455 INFO:teuthology.orchestra.run.vm02.stdout:mgr not available, waiting (2/15)... 2026-03-10T08:35:07.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:07 vm02 bash[17473]: audit 2026-03-10T08:35:07.397830+0000 mon.vm02 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.102:0/511057920' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:35:07.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:07 vm02 bash[17473]: audit 2026-03-10T08:35:07.397830+0000 mon.vm02 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.102:0/511057920' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: cluster 2026-03-10T08:35:08.510712+0000 mon.vm02 (mon.0) 15 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: cluster 2026-03-10T08:35:08.510712+0000 mon.vm02 (mon.0) 15 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: cluster 2026-03-10T08:35:08.515550+0000 mon.vm02 (mon.0) 16 : cluster [DBG] mgrmap e2: vm02.ttibzz(active, starting, since 0.00497153s) 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: cluster 2026-03-10T08:35:08.515550+0000 mon.vm02 (mon.0) 16 : cluster [DBG] mgrmap e2: vm02.ttibzz(active, starting, since 0.00497153s) 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.519229+0000 mon.vm02 (mon.0) 17 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.519229+0000 mon.vm02 (mon.0) 17 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.519733+0000 mon.vm02 (mon.0) 18 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.519733+0000 mon.vm02 (mon.0) 18 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.520120+0000 mon.vm02 (mon.0) 19 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.520120+0000 mon.vm02 (mon.0) 19 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.520478+0000 mon.vm02 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.520478+0000 mon.vm02 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:08.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.520858+0000 mon.vm02 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.520858+0000 mon.vm02 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.521214+0000 mon.vm02 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.521214+0000 mon.vm02 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.522434+0000 mon.vm02 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.522434+0000 mon.vm02 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.523295+0000 mon.vm02 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.523295+0000 mon.vm02 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: cluster 2026-03-10T08:35:08.530931+0000 mon.vm02 (mon.0) 25 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: cluster 2026-03-10T08:35:08.530931+0000 mon.vm02 (mon.0) 25 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.541005+0000 mon.vm02 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.541005+0000 mon.vm02 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.541862+0000 mon.vm02 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.541862+0000 mon.vm02 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.544761+0000 mon.vm02 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.544761+0000 mon.vm02 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.546694+0000 mon.vm02 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.546694+0000 mon.vm02 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.549490+0000 mon.vm02 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:08.791 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:08 vm02 bash[17473]: audit 2026-03-10T08:35:08.549490+0000 mon.vm02 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsid": "e750d050-1c5b-11f1-9e63-531fde0192f6", 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "health": { 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 0 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "vm02" 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-03-10T08:35:09.788 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-10T08:35:09.789 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "btime": "2026-03-10T08:35:03:450016+0000", 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "restful" 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ], 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "modified": "2026-03-10T08:35:03.450673+0000", 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout }, 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-10T08:35:09.790 INFO:teuthology.orchestra.run.vm02.stdout:mgr is available 2026-03-10T08:35:10.084 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [global] 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout fsid = e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.102:3300,v1:192.168.123.102:6789] 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout [osd] 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-10T08:35:10.085 INFO:teuthology.orchestra.run.vm02.stdout:Enabling cephadm module... 2026-03-10T08:35:10.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: cluster 2026-03-10T08:35:09.519323+0000 mon.vm02 (mon.0) 31 : cluster [DBG] mgrmap e3: vm02.ttibzz(active, since 1.00875s) 2026-03-10T08:35:10.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: cluster 2026-03-10T08:35:09.519323+0000 mon.vm02 (mon.0) 31 : cluster [DBG] mgrmap e3: vm02.ttibzz(active, since 1.00875s) 2026-03-10T08:35:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: audit 2026-03-10T08:35:09.744889+0000 mon.vm02 (mon.0) 32 : audit [DBG] from='client.? 192.168.123.102:0/4227904123' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:35:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: audit 2026-03-10T08:35:09.744889+0000 mon.vm02 (mon.0) 32 : audit [DBG] from='client.? 192.168.123.102:0/4227904123' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:35:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: audit 2026-03-10T08:35:10.040006+0000 mon.vm02 (mon.0) 33 : audit [INF] from='client.? 192.168.123.102:0/56060668' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T08:35:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: audit 2026-03-10T08:35:10.040006+0000 mon.vm02 (mon.0) 33 : audit [INF] from='client.? 192.168.123.102:0/56060668' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T08:35:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: audit 2026-03-10T08:35:10.328476+0000 mon.vm02 (mon.0) 34 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T08:35:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:10 vm02 bash[17473]: audit 2026-03-10T08:35:10.328476+0000 mon.vm02 (mon.0) 34 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T08:35:10.945 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-10T08:35:10.945 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 4, 2026-03-10T08:35:10.945 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T08:35:10.945 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "active_name": "vm02.ttibzz", 2026-03-10T08:35:10.945 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-10T08:35:10.945 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-10T08:35:10.945 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for the mgr to restart... 2026-03-10T08:35:10.946 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr epoch 4... 2026-03-10T08:35:11.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:11 vm02 bash[17473]: audit 2026-03-10T08:35:10.524545+0000 mon.vm02 (mon.0) 35 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T08:35:11.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:11 vm02 bash[17473]: audit 2026-03-10T08:35:10.524545+0000 mon.vm02 (mon.0) 35 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T08:35:11.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:11 vm02 bash[17473]: cluster 2026-03-10T08:35:10.528320+0000 mon.vm02 (mon.0) 36 : cluster [DBG] mgrmap e4: vm02.ttibzz(active, since 2s) 2026-03-10T08:35:11.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:11 vm02 bash[17473]: cluster 2026-03-10T08:35:10.528320+0000 mon.vm02 (mon.0) 36 : cluster [DBG] mgrmap e4: vm02.ttibzz(active, since 2s) 2026-03-10T08:35:11.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:11 vm02 bash[17473]: audit 2026-03-10T08:35:10.899248+0000 mon.vm02 (mon.0) 37 : audit [DBG] from='client.? 192.168.123.102:0/115395052' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:35:11.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:11 vm02 bash[17473]: audit 2026-03-10T08:35:10.899248+0000 mon.vm02 (mon.0) 37 : audit [DBG] from='client.? 192.168.123.102:0/115395052' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.970758+0000 mon.vm02 (mon.0) 38 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.970758+0000 mon.vm02 (mon.0) 38 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.970985+0000 mon.vm02 (mon.0) 39 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.970985+0000 mon.vm02 (mon.0) 39 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.975582+0000 mon.vm02 (mon.0) 40 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.975582+0000 mon.vm02 (mon.0) 40 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.975711+0000 mon.vm02 (mon.0) 41 : cluster [DBG] mgrmap e5: vm02.ttibzz(active, starting, since 0.00482893s) 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.975711+0000 mon.vm02 (mon.0) 41 : cluster [DBG] mgrmap e5: vm02.ttibzz(active, starting, since 0.00482893s) 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.978897+0000 mon.vm02 (mon.0) 42 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.978897+0000 mon.vm02 (mon.0) 42 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.979183+0000 mon.vm02 (mon.0) 43 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.979183+0000 mon.vm02 (mon.0) 43 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.980115+0000 mon.vm02 (mon.0) 44 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.980115+0000 mon.vm02 (mon.0) 44 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.980322+0000 mon.vm02 (mon.0) 45 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.980322+0000 mon.vm02 (mon.0) 45 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.980524+0000 mon.vm02 (mon.0) 46 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.980524+0000 mon.vm02 (mon.0) 46 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.987234+0000 mon.vm02 (mon.0) 47 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: cluster 2026-03-10T08:35:13.987234+0000 mon.vm02 (mon.0) 47 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.996714+0000 mon.vm02 (mon.0) 48 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:13.996714+0000 mon.vm02 (mon.0) 48 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.000504+0000 mon.vm02 (mon.0) 49 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.000504+0000 mon.vm02 (mon.0) 49 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.013453+0000 mon.vm02 (mon.0) 50 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.013453+0000 mon.vm02 (mon.0) 50 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.013633+0000 mon.vm02 (mon.0) 51 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.013633+0000 mon.vm02 (mon.0) 51 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.016094+0000 mon.vm02 (mon.0) 52 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:14 vm02 bash[17473]: audit 2026-03-10T08:35:14.016094+0000 mon.vm02 (mon.0) 52 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:15.051 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-10T08:35:15.051 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 6, 2026-03-10T08:35:15.051 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-10T08:35:15.051 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-10T08:35:15.051 INFO:teuthology.orchestra.run.vm02.stdout:mgr epoch 4 is available 2026-03-10T08:35:15.051 INFO:teuthology.orchestra.run.vm02.stdout:Setting orchestrator backend to cephadm... 2026-03-10T08:35:15.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: cephadm 2026-03-10T08:35:13.994191+0000 mgr.vm02.ttibzz (mgr.14118) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-03-10T08:35:15.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: cephadm 2026-03-10T08:35:13.994191+0000 mgr.vm02.ttibzz (mgr.14118) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-03-10T08:35:15.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: audit 2026-03-10T08:35:14.027953+0000 mon.vm02 (mon.0) 53 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:35:15.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: audit 2026-03-10T08:35:14.027953+0000 mon.vm02 (mon.0) 53 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:35:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: audit 2026-03-10T08:35:14.642072+0000 mon.vm02 (mon.0) 54 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: audit 2026-03-10T08:35:14.642072+0000 mon.vm02 (mon.0) 54 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: audit 2026-03-10T08:35:14.645075+0000 mon.vm02 (mon.0) 55 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: audit 2026-03-10T08:35:14.645075+0000 mon.vm02 (mon.0) 55 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: cluster 2026-03-10T08:35:14.979402+0000 mon.vm02 (mon.0) 56 : cluster [DBG] mgrmap e6: vm02.ttibzz(active, since 1.00852s) 2026-03-10T08:35:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:15 vm02 bash[17473]: cluster 2026-03-10T08:35:14.979402+0000 mon.vm02 (mon.0) 56 : cluster [DBG] mgrmap e6: vm02.ttibzz(active, since 1.00852s) 2026-03-10T08:35:15.645 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-10T08:35:15.645 INFO:teuthology.orchestra.run.vm02.stdout:Generating ssh key... 2026-03-10T08:35:16.186 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDl+cP3FXr390sZ1jbKNYHnaztkAaBSx9N987HQDheGk ceph-e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:16.186 INFO:teuthology.orchestra.run.vm02.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-10T08:35:16.186 INFO:teuthology.orchestra.run.vm02.stdout:Adding key to root@localhost authorized_keys... 2026-03-10T08:35:16.186 INFO:teuthology.orchestra.run.vm02.stdout:Adding host vm02... 2026-03-10T08:35:16.436 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:14.979954+0000 mgr.vm02.ttibzz (mgr.14118) 2 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:14.979954+0000 mgr.vm02.ttibzz (mgr.14118) 2 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:14.983865+0000 mgr.vm02.ttibzz (mgr.14118) 3 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:14.983865+0000 mgr.vm02.ttibzz (mgr.14118) 3 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.223374+0000 mgr.vm02.ttibzz (mgr.14118) 4 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTING 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.223374+0000 mgr.vm02.ttibzz (mgr.14118) 4 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTING 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.329311+0000 mgr.vm02.ttibzz (mgr.14118) 5 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.329311+0000 mgr.vm02.ttibzz (mgr.14118) 5 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.332622+0000 mon.vm02 (mon.0) 57 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.332622+0000 mon.vm02 (mon.0) 57 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.337143+0000 mon.vm02 (mon.0) 58 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.337143+0000 mon.vm02 (mon.0) 58 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.341161+0000 mgr.vm02.ttibzz (mgr.14118) 6 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.341161+0000 mgr.vm02.ttibzz (mgr.14118) 6 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.341606+0000 mgr.vm02.ttibzz (mgr.14118) 7 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Client ('192.168.123.102', 42084) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.341606+0000 mgr.vm02.ttibzz (mgr.14118) 7 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Client ('192.168.123.102', 42084) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.442210+0000 mgr.vm02.ttibzz (mgr.14118) 8 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.442210+0000 mgr.vm02.ttibzz (mgr.14118) 8 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.442248+0000 mgr.vm02.ttibzz (mgr.14118) 9 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTED 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.442248+0000 mgr.vm02.ttibzz (mgr.14118) 9 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTED 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.442961+0000 mon.vm02 (mon.0) 59 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.442961+0000 mon.vm02 (mon.0) 59 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.885748+0000 mon.vm02 (mon.0) 60 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.885748+0000 mon.vm02 (mon.0) 60 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.888187+0000 mon.vm02 (mon.0) 61 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:16.437 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:16 vm02 bash[17473]: audit 2026-03-10T08:35:15.888187+0000 mon.vm02 (mon.0) 61 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:17.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:15.606084+0000 mgr.vm02.ttibzz (mgr.14118) 10 : audit [DBG] from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:15.606084+0000 mgr.vm02.ttibzz (mgr.14118) 10 : audit [DBG] from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:15.867576+0000 mgr.vm02.ttibzz (mgr.14118) 11 : audit [DBG] from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:15.867576+0000 mgr.vm02.ttibzz (mgr.14118) 11 : audit [DBG] from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.867793+0000 mgr.vm02.ttibzz (mgr.14118) 12 : cephadm [INF] Generating ssh key... 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: cephadm 2026-03-10T08:35:15.867793+0000 mgr.vm02.ttibzz (mgr.14118) 12 : cephadm [INF] Generating ssh key... 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:16.145455+0000 mgr.vm02.ttibzz (mgr.14118) 13 : audit [DBG] from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:16.145455+0000 mgr.vm02.ttibzz (mgr.14118) 13 : audit [DBG] from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:16.426254+0000 mgr.vm02.ttibzz (mgr.14118) 14 : audit [DBG] from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm02", "addr": "192.168.123.102", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: audit 2026-03-10T08:35:16.426254+0000 mgr.vm02.ttibzz (mgr.14118) 14 : audit [DBG] from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm02", "addr": "192.168.123.102", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: cluster 2026-03-10T08:35:16.892113+0000 mon.vm02 (mon.0) 62 : cluster [DBG] mgrmap e7: vm02.ttibzz(active, since 2s) 2026-03-10T08:35:17.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:17 vm02 bash[17473]: cluster 2026-03-10T08:35:16.892113+0000 mon.vm02 (mon.0) 62 : cluster [DBG] mgrmap e7: vm02.ttibzz(active, since 2s) 2026-03-10T08:35:18.358 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Added host 'vm02' with addr '192.168.123.102' 2026-03-10T08:35:18.358 INFO:teuthology.orchestra.run.vm02.stdout:Deploying mon service with default placement... 2026-03-10T08:35:18.634 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:18 vm02 bash[17473]: cephadm 2026-03-10T08:35:16.989872+0000 mgr.vm02.ttibzz (mgr.14118) 15 : cephadm [INF] Deploying cephadm binary to vm02 2026-03-10T08:35:18.634 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:18 vm02 bash[17473]: cephadm 2026-03-10T08:35:16.989872+0000 mgr.vm02.ttibzz (mgr.14118) 15 : cephadm [INF] Deploying cephadm binary to vm02 2026-03-10T08:35:18.634 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:18 vm02 bash[17473]: audit 2026-03-10T08:35:18.287797+0000 mon.vm02 (mon.0) 63 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:18.634 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:18 vm02 bash[17473]: audit 2026-03-10T08:35:18.287797+0000 mon.vm02 (mon.0) 63 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:18.634 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:18 vm02 bash[17473]: audit 2026-03-10T08:35:18.288518+0000 mon.vm02 (mon.0) 64 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:18.634 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:18 vm02 bash[17473]: audit 2026-03-10T08:35:18.288518+0000 mon.vm02 (mon.0) 64 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:18.662 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-10T08:35:18.662 INFO:teuthology.orchestra.run.vm02.stdout:Deploying mgr service with default placement... 2026-03-10T08:35:18.930 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-10T08:35:18.931 INFO:teuthology.orchestra.run.vm02.stdout:Deploying crash service with default placement... 2026-03-10T08:35:19.218 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-03-10T08:35:19.218 INFO:teuthology.orchestra.run.vm02.stdout:Deploying ceph-exporter service with default placement... 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: cephadm 2026-03-10T08:35:18.288219+0000 mgr.vm02.ttibzz (mgr.14118) 16 : cephadm [INF] Added host vm02 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: cephadm 2026-03-10T08:35:18.288219+0000 mgr.vm02.ttibzz (mgr.14118) 16 : cephadm [INF] Added host vm02 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: audit 2026-03-10T08:35:18.619056+0000 mon.vm02 (mon.0) 65 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: audit 2026-03-10T08:35:18.619056+0000 mon.vm02 (mon.0) 65 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: audit 2026-03-10T08:35:18.885765+0000 mon.vm02 (mon.0) 66 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: audit 2026-03-10T08:35:18.885765+0000 mon.vm02 (mon.0) 66 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: audit 2026-03-10T08:35:19.173440+0000 mon.vm02 (mon.0) 67 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:19.485 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:19 vm02 bash[17473]: audit 2026-03-10T08:35:19.173440+0000 mon.vm02 (mon.0) 67 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:19.513 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-03-10T08:35:19.513 INFO:teuthology.orchestra.run.vm02.stdout:Deploying prometheus service with default placement... 2026-03-10T08:35:19.863 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-03-10T08:35:19.864 INFO:teuthology.orchestra.run.vm02.stdout:Deploying grafana service with default placement... 2026-03-10T08:35:20.260 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-03-10T08:35:20.260 INFO:teuthology.orchestra.run.vm02.stdout:Deploying node-exporter service with default placement... 2026-03-10T08:35:20.478 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:18.615530+0000 mgr.vm02.ttibzz (mgr.14118) 17 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.478 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:18.615530+0000 mgr.vm02.ttibzz (mgr.14118) 17 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.478 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:18.616338+0000 mgr.vm02.ttibzz (mgr.14118) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-03-10T08:35:20.478 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:18.616338+0000 mgr.vm02.ttibzz (mgr.14118) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-03-10T08:35:20.478 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:18.882176+0000 mgr.vm02.ttibzz (mgr.14118) 19 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.478 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:18.882176+0000 mgr.vm02.ttibzz (mgr.14118) 19 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:18.882982+0000 mgr.vm02.ttibzz (mgr.14118) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:18.882982+0000 mgr.vm02.ttibzz (mgr.14118) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.168374+0000 mgr.vm02.ttibzz (mgr.14118) 21 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.168374+0000 mgr.vm02.ttibzz (mgr.14118) 21 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:19.169057+0000 mgr.vm02.ttibzz (mgr.14118) 22 : cephadm [INF] Saving service crash spec with placement * 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:19.169057+0000 mgr.vm02.ttibzz (mgr.14118) 22 : cephadm [INF] Saving service crash spec with placement * 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.467985+0000 mgr.vm02.ttibzz (mgr.14118) 23 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.467985+0000 mgr.vm02.ttibzz (mgr.14118) 23 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:19.468861+0000 mgr.vm02.ttibzz (mgr.14118) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: cephadm 2026-03-10T08:35:19.468861+0000 mgr.vm02.ttibzz (mgr.14118) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.471309+0000 mon.vm02 (mon.0) 68 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.471309+0000 mon.vm02 (mon.0) 68 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.799713+0000 mon.vm02 (mon.0) 69 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.799713+0000 mon.vm02 (mon.0) 69 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.836177+0000 mon.vm02 (mon.0) 70 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:19.836177+0000 mon.vm02 (mon.0) 70 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:20.146031+0000 mon.vm02 (mon.0) 71 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:20.146031+0000 mon.vm02 (mon.0) 71 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:20.247325+0000 mon.vm02 (mon.0) 72 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.479 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:20 vm02 bash[17473]: audit 2026-03-10T08:35:20.247325+0000 mon.vm02 (mon.0) 72 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:20.611 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-03-10T08:35:20.611 INFO:teuthology.orchestra.run.vm02.stdout:Deploying alertmanager service with default placement... 2026-03-10T08:35:20.893 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-03-10T08:35:21.447 INFO:teuthology.orchestra.run.vm02.stdout:Enabling the dashboard module... 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:19.795835+0000 mgr.vm02.ttibzz (mgr.14118) 25 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:19.795835+0000 mgr.vm02.ttibzz (mgr.14118) 25 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: cephadm 2026-03-10T08:35:19.796742+0000 mgr.vm02.ttibzz (mgr.14118) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: cephadm 2026-03-10T08:35:19.796742+0000 mgr.vm02.ttibzz (mgr.14118) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.140352+0000 mgr.vm02.ttibzz (mgr.14118) 27 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.140352+0000 mgr.vm02.ttibzz (mgr.14118) 27 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: cephadm 2026-03-10T08:35:20.141244+0000 mgr.vm02.ttibzz (mgr.14118) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: cephadm 2026-03-10T08:35:20.141244+0000 mgr.vm02.ttibzz (mgr.14118) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.545345+0000 mgr.vm02.ttibzz (mgr.14118) 29 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.545345+0000 mgr.vm02.ttibzz (mgr.14118) 29 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: cephadm 2026-03-10T08:35:20.546153+0000 mgr.vm02.ttibzz (mgr.14118) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: cephadm 2026-03-10T08:35:20.546153+0000 mgr.vm02.ttibzz (mgr.14118) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.549702+0000 mon.vm02 (mon.0) 73 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.549702+0000 mon.vm02 (mon.0) 73 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.846271+0000 mon.vm02 (mon.0) 74 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:20.846271+0000 mon.vm02 (mon.0) 74 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:21.126813+0000 mon.vm02 (mon.0) 75 : audit [INF] from='client.? 192.168.123.102:0/1890823856' entity='client.admin' 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:21.126813+0000 mon.vm02 (mon.0) 75 : audit [INF] from='client.? 192.168.123.102:0/1890823856' entity='client.admin' 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:21.394331+0000 mon.vm02 (mon.0) 76 : audit [INF] from='client.? 192.168.123.102:0/2600197362' entity='client.admin' 2026-03-10T08:35:21.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:21 vm02 bash[17473]: audit 2026-03-10T08:35:21.394331+0000 mon.vm02 (mon.0) 76 : audit [INF] from='client.? 192.168.123.102:0/2600197362' entity='client.admin' 2026-03-10T08:35:22.894 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:22 vm02 bash[17473]: audit 2026-03-10T08:35:20.841796+0000 mgr.vm02.ttibzz (mgr.14118) 31 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:22.895 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:22 vm02 bash[17473]: audit 2026-03-10T08:35:20.841796+0000 mgr.vm02.ttibzz (mgr.14118) 31 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:22.895 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:22 vm02 bash[17473]: cephadm 2026-03-10T08:35:20.842662+0000 mgr.vm02.ttibzz (mgr.14118) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-03-10T08:35:22.895 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:22 vm02 bash[17473]: cephadm 2026-03-10T08:35:20.842662+0000 mgr.vm02.ttibzz (mgr.14118) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-03-10T08:35:22.895 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:22 vm02 bash[17473]: audit 2026-03-10T08:35:21.702464+0000 mon.vm02 (mon.0) 77 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T08:35:22.895 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:22 vm02 bash[17473]: audit 2026-03-10T08:35:21.702464+0000 mon.vm02 (mon.0) 77 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T08:35:23.029 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-10T08:35:23.029 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "epoch": 8, 2026-03-10T08:35:23.030 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-10T08:35:23.030 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "active_name": "vm02.ttibzz", 2026-03-10T08:35:23.030 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-10T08:35:23.030 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-10T08:35:23.030 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for the mgr to restart... 2026-03-10T08:35:23.030 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for mgr epoch 8... 2026-03-10T08:35:23.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:23 vm02 bash[17473]: audit 2026-03-10T08:35:22.552193+0000 mon.vm02 (mon.0) 78 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T08:35:23.891 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:23 vm02 bash[17473]: audit 2026-03-10T08:35:22.552193+0000 mon.vm02 (mon.0) 78 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T08:35:23.891 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:23 vm02 bash[17473]: cluster 2026-03-10T08:35:22.557494+0000 mon.vm02 (mon.0) 79 : cluster [DBG] mgrmap e8: vm02.ttibzz(active, since 8s) 2026-03-10T08:35:23.891 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:23 vm02 bash[17473]: cluster 2026-03-10T08:35:22.557494+0000 mon.vm02 (mon.0) 79 : cluster [DBG] mgrmap e8: vm02.ttibzz(active, since 8s) 2026-03-10T08:35:23.891 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:23 vm02 bash[17473]: audit 2026-03-10T08:35:22.972433+0000 mon.vm02 (mon.0) 80 : audit [DBG] from='client.? 192.168.123.102:0/3930567975' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:35:23.891 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:23 vm02 bash[17473]: audit 2026-03-10T08:35:22.972433+0000 mon.vm02 (mon.0) 80 : audit [DBG] from='client.? 192.168.123.102:0/3930567975' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:35:26.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.890469+0000 mon.vm02 (mon.0) 81 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.890469+0000 mon.vm02 (mon.0) 81 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.890709+0000 mon.vm02 (mon.0) 82 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.890709+0000 mon.vm02 (mon.0) 82 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.896594+0000 mon.vm02 (mon.0) 83 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.896594+0000 mon.vm02 (mon.0) 83 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.896735+0000 mon.vm02 (mon.0) 84 : cluster [DBG] mgrmap e9: vm02.ttibzz(active, starting, since 0.00613587s) 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.896735+0000 mon.vm02 (mon.0) 84 : cluster [DBG] mgrmap e9: vm02.ttibzz(active, starting, since 0.00613587s) 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899065+0000 mon.vm02 (mon.0) 85 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899065+0000 mon.vm02 (mon.0) 85 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899141+0000 mon.vm02 (mon.0) 86 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899141+0000 mon.vm02 (mon.0) 86 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899500+0000 mon.vm02 (mon.0) 87 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899500+0000 mon.vm02 (mon.0) 87 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899558+0000 mon.vm02 (mon.0) 88 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899558+0000 mon.vm02 (mon.0) 88 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899604+0000 mon.vm02 (mon.0) 89 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.899604+0000 mon.vm02 (mon.0) 89 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.905263+0000 mon.vm02 (mon.0) 90 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: cluster 2026-03-10T08:35:25.905263+0000 mon.vm02 (mon.0) 90 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.921047+0000 mon.vm02 (mon.0) 91 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.921047+0000 mon.vm02 (mon.0) 91 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.921779+0000 mon.vm02 (mon.0) 92 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.921779+0000 mon.vm02 (mon.0) 92 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.928585+0000 mon.vm02 (mon.0) 93 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:26.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:25 vm02 bash[17473]: audit 2026-03-10T08:35:25.928585+0000 mon.vm02 (mon.0) 93 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:35:26.946 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout { 2026-03-10T08:35:26.947 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 10, 2026-03-10T08:35:26.947 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-10T08:35:26.947 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout } 2026-03-10T08:35:26.947 INFO:teuthology.orchestra.run.vm02.stdout:mgr epoch 8 is available 2026-03-10T08:35:26.947 INFO:teuthology.orchestra.run.vm02.stdout:Generating a dashboard self-signed certificate... 2026-03-10T08:35:27.277 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-10T08:35:27.277 INFO:teuthology.orchestra.run.vm02.stdout:Creating initial admin user... 2026-03-10T08:35:27.700 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$UWXUWicCMKQexFGaNv24c.EgRh/4zIH.ymFjSp63acJr5KINclfcW", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773131727, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-10T08:35:27.700 INFO:teuthology.orchestra.run.vm02.stdout:Fetching dashboard port number... 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:26.840125+0000 mgr.vm02.ttibzz (mgr.14162) 1 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Bus STARTING 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:26.840125+0000 mgr.vm02.ttibzz (mgr.14162) 1 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Bus STARTING 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cluster 2026-03-10T08:35:26.900274+0000 mon.vm02 (mon.0) 94 : cluster [DBG] mgrmap e10: vm02.ttibzz(active, since 1.00967s) 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cluster 2026-03-10T08:35:26.900274+0000 mon.vm02 (mon.0) 94 : cluster [DBG] mgrmap e10: vm02.ttibzz(active, since 1.00967s) 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:26.901345+0000 mgr.vm02.ttibzz (mgr.14162) 2 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:26.901345+0000 mgr.vm02.ttibzz (mgr.14162) 2 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:26.905161+0000 mgr.vm02.ttibzz (mgr.14162) 3 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:26.905161+0000 mgr.vm02.ttibzz (mgr.14162) 3 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:26.942110+0000 mgr.vm02.ttibzz (mgr.14162) 4 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:35:27.969 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:26.942110+0000 mgr.vm02.ttibzz (mgr.14162) 4 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:27.053675+0000 mgr.vm02.ttibzz (mgr.14162) 5 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:27.053675+0000 mgr.vm02.ttibzz (mgr.14162) 5 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:27.053713+0000 mgr.vm02.ttibzz (mgr.14162) 6 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Bus STARTED 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:27.053713+0000 mgr.vm02.ttibzz (mgr.14162) 6 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Bus STARTED 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:27.054109+0000 mgr.vm02.ttibzz (mgr.14162) 7 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Client ('192.168.123.102', 40748) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: cephadm 2026-03-10T08:35:27.054109+0000 mgr.vm02.ttibzz (mgr.14162) 7 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Client ('192.168.123.102', 40748) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.160790+0000 mgr.vm02.ttibzz (mgr.14162) 8 : audit [DBG] from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.160790+0000 mgr.vm02.ttibzz (mgr.14162) 8 : audit [DBG] from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.228502+0000 mon.vm02 (mon.0) 95 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.228502+0000 mon.vm02 (mon.0) 95 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.231354+0000 mon.vm02 (mon.0) 96 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.231354+0000 mon.vm02 (mon.0) 96 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.500881+0000 mgr.vm02.ttibzz (mgr.14162) 9 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.500881+0000 mgr.vm02.ttibzz (mgr.14162) 9 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.657200+0000 mon.vm02 (mon.0) 97 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:27.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:27 vm02 bash[17473]: audit 2026-03-10T08:35:27.657200+0000 mon.vm02 (mon.0) 97 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:28.004 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stdout 8443 2026-03-10T08:35:28.004 INFO:teuthology.orchestra.run.vm02.stdout:firewalld does not appear to be present 2026-03-10T08:35:28.004 INFO:teuthology.orchestra.run.vm02.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-10T08:35:28.005 INFO:teuthology.orchestra.run.vm02.stdout:Ceph Dashboard is now available at: 2026-03-10T08:35:28.005 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.005 INFO:teuthology.orchestra.run.vm02.stdout: URL: https://vm02.local:8443/ 2026-03-10T08:35:28.005 INFO:teuthology.orchestra.run.vm02.stdout: User: admin 2026-03-10T08:35:28.005 INFO:teuthology.orchestra.run.vm02.stdout: Password: ox4u14naj2 2026-03-10T08:35:28.005 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.005 INFO:teuthology.orchestra.run.vm02.stdout:Saving cluster configuration to /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config directory 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout:Or, if you are only running a single cluster on this host: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: ceph telemetry on 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout:For more information see: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-10T08:35:28.330 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:28.331 INFO:teuthology.orchestra.run.vm02.stdout:Bootstrap complete. 2026-03-10T08:35:28.351 INFO:tasks.cephadm:Fetching config... 2026-03-10T08:35:28.351 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:35:28.351 DEBUG:teuthology.orchestra.run.vm02:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-10T08:35:28.354 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-10T08:35:28.354 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:35:28.354 DEBUG:teuthology.orchestra.run.vm02:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-10T08:35:28.397 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-10T08:35:28.397 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:35:28.397 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/keyring of=/dev/stdout 2026-03-10T08:35:28.444 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-10T08:35:28.444 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:35:28.444 DEBUG:teuthology.orchestra.run.vm02:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-10T08:35:28.489 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-10T08:35:28.489 DEBUG:teuthology.orchestra.run.vm02:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDl+cP3FXr390sZ1jbKNYHnaztkAaBSx9N987HQDheGk ceph-e750d050-1c5b-11f1-9e63-531fde0192f6' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T08:35:28.541 INFO:teuthology.orchestra.run.vm02.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDl+cP3FXr390sZ1jbKNYHnaztkAaBSx9N987HQDheGk ceph-e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:28.545 DEBUG:teuthology.orchestra.run.vm07:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDl+cP3FXr390sZ1jbKNYHnaztkAaBSx9N987HQDheGk ceph-e750d050-1c5b-11f1-9e63-531fde0192f6' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-10T08:35:28.557 INFO:teuthology.orchestra.run.vm07.stdout:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDl+cP3FXr390sZ1jbKNYHnaztkAaBSx9N987HQDheGk ceph-e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:35:28.564 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-10T08:35:29.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:28 vm02 bash[17473]: audit 2026-03-10T08:35:27.956148+0000 mon.vm02 (mon.0) 98 : audit [DBG] from='client.? 192.168.123.102:0/2217454896' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T08:35:29.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:28 vm02 bash[17473]: audit 2026-03-10T08:35:27.956148+0000 mon.vm02 (mon.0) 98 : audit [DBG] from='client.? 192.168.123.102:0/2217454896' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T08:35:29.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:28 vm02 bash[17473]: audit 2026-03-10T08:35:28.284369+0000 mon.vm02 (mon.0) 99 : audit [INF] from='client.? 192.168.123.102:0/196270727' entity='client.admin' 2026-03-10T08:35:29.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:28 vm02 bash[17473]: audit 2026-03-10T08:35:28.284369+0000 mon.vm02 (mon.0) 99 : audit [INF] from='client.? 192.168.123.102:0/196270727' entity='client.admin' 2026-03-10T08:35:29.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:28 vm02 bash[17473]: cluster 2026-03-10T08:35:28.660467+0000 mon.vm02 (mon.0) 100 : cluster [DBG] mgrmap e11: vm02.ttibzz(active, since 2s) 2026-03-10T08:35:29.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:28 vm02 bash[17473]: cluster 2026-03-10T08:35:28.660467+0000 mon.vm02 (mon.0) 100 : cluster [DBG] mgrmap e11: vm02.ttibzz(active, since 2s) 2026-03-10T08:35:32.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:31 vm02 bash[17473]: audit 2026-03-10T08:35:30.976401+0000 mon.vm02 (mon.0) 101 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:32.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:31 vm02 bash[17473]: audit 2026-03-10T08:35:30.976401+0000 mon.vm02 (mon.0) 101 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:32.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:31 vm02 bash[17473]: audit 2026-03-10T08:35:31.586671+0000 mon.vm02 (mon.0) 102 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:32.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:31 vm02 bash[17473]: audit 2026-03-10T08:35:31.586671+0000 mon.vm02 (mon.0) 102 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:32.845 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:35:33.171 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-10T08:35:33.171 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-10T08:35:33.860 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:33 vm02 bash[17473]: cluster 2026-03-10T08:35:32.592481+0000 mon.vm02 (mon.0) 103 : cluster [DBG] mgrmap e12: vm02.ttibzz(active, since 6s) 2026-03-10T08:35:33.860 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:33 vm02 bash[17473]: cluster 2026-03-10T08:35:32.592481+0000 mon.vm02 (mon.0) 103 : cluster [DBG] mgrmap e12: vm02.ttibzz(active, since 6s) 2026-03-10T08:35:33.860 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:33 vm02 bash[17473]: audit 2026-03-10T08:35:33.103116+0000 mon.vm02 (mon.0) 104 : audit [INF] from='client.? 192.168.123.102:0/796802570' entity='client.admin' 2026-03-10T08:35:33.860 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:33 vm02 bash[17473]: audit 2026-03-10T08:35:33.103116+0000 mon.vm02 (mon.0) 104 : audit [INF] from='client.? 192.168.123.102:0/796802570' entity='client.admin' 2026-03-10T08:35:37.850 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:35:38.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:37 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:38.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.375103+0000 mon.vm02 (mon.0) 105 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.375103+0000 mon.vm02 (mon.0) 105 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.378012+0000 mon.vm02 (mon.0) 106 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.378012+0000 mon.vm02 (mon.0) 106 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.378750+0000 mon.vm02 (mon.0) 107 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.378750+0000 mon.vm02 (mon.0) 107 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.381787+0000 mon.vm02 (mon.0) 108 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.381787+0000 mon.vm02 (mon.0) 108 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.382670+0000 mon.vm02 (mon.0) 109 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.382670+0000 mon.vm02 (mon.0) 109 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.383589+0000 mon.vm02 (mon.0) 110 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.383589+0000 mon.vm02 (mon.0) 110 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.384950+0000 mon.vm02 (mon.0) 111 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:37.384950+0000 mon.vm02 (mon.0) 111 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: cephadm 2026-03-10T08:35:37.385498+0000 mgr.vm02.ttibzz (mgr.14162) 10 : cephadm [INF] Deploying daemon ceph-exporter.vm02 on vm02 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: cephadm 2026-03-10T08:35:37.385498+0000 mgr.vm02.ttibzz (mgr.14162) 10 : cephadm [INF] Deploying daemon ceph-exporter.vm02 on vm02 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.204582+0000 mon.vm02 (mon.0) 112 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.204582+0000 mon.vm02 (mon.0) 112 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.208477+0000 mon.vm02 (mon.0) 113 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.208477+0000 mon.vm02 (mon.0) 113 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.214161+0000 mon.vm02 (mon.0) 114 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.214161+0000 mon.vm02 (mon.0) 114 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.219335+0000 mon.vm02 (mon.0) 115 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.219335+0000 mon.vm02 (mon.0) 115 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.221962+0000 mon.vm02 (mon.0) 116 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.221962+0000 mon.vm02 (mon.0) 116 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.223192+0000 mon.vm02 (mon.0) 117 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.223192+0000 mon.vm02 (mon.0) 117 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.225551+0000 mon.vm02 (mon.0) 118 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:35:38.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 bash[17473]: audit 2026-03-10T08:35:38.225551+0000 mon.vm02 (mon.0) 118 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:35:38.561 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm07 2026-03-10T08:35:38.562 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T08:35:38.562 DEBUG:teuthology.orchestra.run.vm07:> dd of=/etc/ceph/ceph.conf 2026-03-10T08:35:38.565 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T08:35:38.565 DEBUG:teuthology.orchestra.run.vm07:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T08:35:38.612 INFO:tasks.cephadm:Adding host vm07 to orchestrator... 2026-03-10T08:35:38.612 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch host add vm07 2026-03-10T08:35:39.174 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:38 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:39.174 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: cephadm 2026-03-10T08:35:38.226227+0000 mgr.vm02.ttibzz (mgr.14162) 11 : cephadm [INF] Deploying daemon crash.vm02 on vm02 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: cephadm 2026-03-10T08:35:38.226227+0000 mgr.vm02.ttibzz (mgr.14162) 11 : cephadm [INF] Deploying daemon crash.vm02 on vm02 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:38.413006+0000 mgr.vm02.ttibzz (mgr.14162) 12 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:38.413006+0000 mgr.vm02.ttibzz (mgr.14162) 12 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:38.416184+0000 mon.vm02 (mon.0) 119 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:38.416184+0000 mon.vm02 (mon.0) 119 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.180251+0000 mon.vm02 (mon.0) 120 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.180251+0000 mon.vm02 (mon.0) 120 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.189138+0000 mon.vm02 (mon.0) 121 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.189138+0000 mon.vm02 (mon.0) 121 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.192926+0000 mon.vm02 (mon.0) 122 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.192926+0000 mon.vm02 (mon.0) 122 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.200628+0000 mon.vm02 (mon.0) 123 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.483 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 bash[17473]: audit 2026-03-10T08:35:39.200628+0000 mon.vm02 (mon.0) 123 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:39.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:40.211 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:39 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: cephadm 2026-03-10T08:35:39.201969+0000 mgr.vm02.ttibzz (mgr.14162) 13 : cephadm [INF] Deploying daemon node-exporter.vm02 on vm02 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: cephadm 2026-03-10T08:35:39.201969+0000 mgr.vm02.ttibzz (mgr.14162) 13 : cephadm [INF] Deploying daemon node-exporter.vm02 on vm02 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.936531+0000 mon.vm02 (mon.0) 124 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.936531+0000 mon.vm02 (mon.0) 124 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.940762+0000 mon.vm02 (mon.0) 125 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.940762+0000 mon.vm02 (mon.0) 125 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.944041+0000 mon.vm02 (mon.0) 126 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.944041+0000 mon.vm02 (mon.0) 126 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.946953+0000 mon.vm02 (mon.0) 127 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:40 vm02 bash[17473]: audit 2026-03-10T08:35:39.946953+0000 mon.vm02 (mon.0) 127 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:41.716 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:41 vm02 bash[17473]: cephadm 2026-03-10T08:35:39.952093+0000 mgr.vm02.ttibzz (mgr.14162) 14 : cephadm [INF] Deploying daemon alertmanager.vm02 on vm02 2026-03-10T08:35:41.717 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:41 vm02 bash[17473]: cephadm 2026-03-10T08:35:39.952093+0000 mgr.vm02.ttibzz (mgr.14162) 14 : cephadm [INF] Deploying daemon alertmanager.vm02 on vm02 2026-03-10T08:35:41.717 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:41 vm02 bash[17473]: audit 2026-03-10T08:35:40.954989+0000 mon.vm02 (mon.0) 128 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:41.717 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:41 vm02 bash[17473]: audit 2026-03-10T08:35:40.954989+0000 mon.vm02 (mon.0) 128 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:44.270 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:35:44.379 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:44 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:44.379 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:44 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.424011+0000 mon.vm02 (mon.0) 129 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.424011+0000 mon.vm02 (mon.0) 129 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.428108+0000 mon.vm02 (mon.0) 130 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.428108+0000 mon.vm02 (mon.0) 130 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.431104+0000 mon.vm02 (mon.0) 131 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.431104+0000 mon.vm02 (mon.0) 131 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.434568+0000 mon.vm02 (mon.0) 132 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.434568+0000 mon.vm02 (mon.0) 132 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.438436+0000 mon.vm02 (mon.0) 133 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.438436+0000 mon.vm02 (mon.0) 133 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.449174+0000 mon.vm02 (mon.0) 134 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.449174+0000 mon.vm02 (mon.0) 134 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: cephadm 2026-03-10T08:35:44.453671+0000 mgr.vm02.ttibzz (mgr.14162) 15 : cephadm [INF] Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: cephadm 2026-03-10T08:35:44.453671+0000 mgr.vm02.ttibzz (mgr.14162) 15 : cephadm [INF] Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.520528+0000 mon.vm02 (mon.0) 135 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.520528+0000 mon.vm02 (mon.0) 135 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.524271+0000 mon.vm02 (mon.0) 136 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.524271+0000 mon.vm02 (mon.0) 136 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.526514+0000 mon.vm02 (mon.0) 137 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.526514+0000 mon.vm02 (mon.0) 137 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.526942+0000 mgr.vm02.ttibzz (mgr.14162) 16 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.526942+0000 mgr.vm02.ttibzz (mgr.14162) 16 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.533416+0000 mon.vm02 (mon.0) 138 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: audit 2026-03-10T08:35:44.533416+0000 mon.vm02 (mon.0) 138 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: cephadm 2026-03-10T08:35:44.541013+0000 mgr.vm02.ttibzz (mgr.14162) 17 : cephadm [INF] Deploying daemon grafana.vm02 on vm02 2026-03-10T08:35:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:45 vm02 bash[17473]: cephadm 2026-03-10T08:35:44.541013+0000 mgr.vm02.ttibzz (mgr.14162) 17 : cephadm [INF] Deploying daemon grafana.vm02 on vm02 2026-03-10T08:35:46.586 INFO:teuthology.orchestra.run.vm02.stdout:Added host 'vm07' with addr '192.168.123.107' 2026-03-10T08:35:46.647 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch host ls --format=json 2026-03-10T08:35:46.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:46 vm02 bash[17473]: audit 2026-03-10T08:35:44.811844+0000 mgr.vm02.ttibzz (mgr.14162) 18 : audit [DBG] from='client.14187 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:46.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:46 vm02 bash[17473]: audit 2026-03-10T08:35:44.811844+0000 mgr.vm02.ttibzz (mgr.14162) 18 : audit [DBG] from='client.14187 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:35:46.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:46 vm02 bash[17473]: cephadm 2026-03-10T08:35:45.354122+0000 mgr.vm02.ttibzz (mgr.14162) 19 : cephadm [INF] Deploying cephadm binary to vm07 2026-03-10T08:35:46.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:46 vm02 bash[17473]: cephadm 2026-03-10T08:35:45.354122+0000 mgr.vm02.ttibzz (mgr.14162) 19 : cephadm [INF] Deploying cephadm binary to vm07 2026-03-10T08:35:46.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:46 vm02 bash[17473]: audit 2026-03-10T08:35:45.961626+0000 mon.vm02 (mon.0) 139 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:46.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:46 vm02 bash[17473]: audit 2026-03-10T08:35:45.961626+0000 mon.vm02 (mon.0) 139 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:48.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:47 vm02 bash[17473]: cluster 2026-03-10T08:35:45.900981+0000 mgr.vm02.ttibzz (mgr.14162) 20 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:48.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:47 vm02 bash[17473]: cluster 2026-03-10T08:35:45.900981+0000 mgr.vm02.ttibzz (mgr.14162) 20 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:48.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:47 vm02 bash[17473]: audit 2026-03-10T08:35:46.580951+0000 mon.vm02 (mon.0) 140 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:48.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:47 vm02 bash[17473]: audit 2026-03-10T08:35:46.580951+0000 mon.vm02 (mon.0) 140 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:48.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:47 vm02 bash[17473]: cephadm 2026-03-10T08:35:46.581350+0000 mgr.vm02.ttibzz (mgr.14162) 21 : cephadm [INF] Added host vm07 2026-03-10T08:35:48.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:47 vm02 bash[17473]: cephadm 2026-03-10T08:35:46.581350+0000 mgr.vm02.ttibzz (mgr.14162) 21 : cephadm [INF] Added host vm07 2026-03-10T08:35:48.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:48 vm02 bash[17473]: cluster 2026-03-10T08:35:47.901199+0000 mgr.vm02.ttibzz (mgr.14162) 22 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:48.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:48 vm02 bash[17473]: cluster 2026-03-10T08:35:47.901199+0000 mgr.vm02.ttibzz (mgr.14162) 22 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:51.273 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:35:51.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:50 vm02 bash[17473]: cluster 2026-03-10T08:35:49.901414+0000 mgr.vm02.ttibzz (mgr.14162) 23 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:51.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:50 vm02 bash[17473]: cluster 2026-03-10T08:35:49.901414+0000 mgr.vm02.ttibzz (mgr.14162) 23 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:52.556 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:35:52.556 INFO:teuthology.orchestra.run.vm02.stdout:[{"addr": "192.168.123.102", "hostname": "vm02", "labels": [], "status": ""}, {"addr": "192.168.123.107", "hostname": "vm07", "labels": [], "status": ""}] 2026-03-10T08:35:52.658 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-10T08:35:52.658 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd crush tunables default 2026-03-10T08:35:53.186 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:53 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:53.186 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:53 vm02 bash[17473]: cluster 2026-03-10T08:35:51.901626+0000 mgr.vm02.ttibzz (mgr.14162) 24 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:53.186 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:53 vm02 bash[17473]: cluster 2026-03-10T08:35:51.901626+0000 mgr.vm02.ttibzz (mgr.14162) 24 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:53.186 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:53 vm02 bash[17473]: audit 2026-03-10T08:35:52.552624+0000 mgr.vm02.ttibzz (mgr.14162) 25 : audit [DBG] from='client.14189 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:35:53.186 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:53 vm02 bash[17473]: audit 2026-03-10T08:35:52.552624+0000 mgr.vm02.ttibzz (mgr.14162) 25 : audit [DBG] from='client.14189 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:35:53.470 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:53 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.269607+0000 mon.vm02 (mon.0) 141 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.269607+0000 mon.vm02 (mon.0) 141 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.274057+0000 mon.vm02 (mon.0) 142 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.274057+0000 mon.vm02 (mon.0) 142 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.277744+0000 mon.vm02 (mon.0) 143 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.277744+0000 mon.vm02 (mon.0) 143 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.280455+0000 mon.vm02 (mon.0) 144 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.280455+0000 mon.vm02 (mon.0) 144 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.283405+0000 mon.vm02 (mon.0) 145 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.283405+0000 mon.vm02 (mon.0) 145 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.288079+0000 mon.vm02 (mon.0) 146 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.288079+0000 mon.vm02 (mon.0) 146 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.292028+0000 mon.vm02 (mon.0) 147 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.292028+0000 mon.vm02 (mon.0) 147 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.298809+0000 mon.vm02 (mon.0) 148 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: audit 2026-03-10T08:35:53.298809+0000 mon.vm02 (mon.0) 148 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: cephadm 2026-03-10T08:35:53.481017+0000 mgr.vm02.ttibzz (mgr.14162) 26 : cephadm [INF] Deploying daemon prometheus.vm02 on vm02 2026-03-10T08:35:54.473 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:54 vm02 bash[17473]: cephadm 2026-03-10T08:35:53.481017+0000 mgr.vm02.ttibzz (mgr.14162) 26 : cephadm [INF] Deploying daemon prometheus.vm02 on vm02 2026-03-10T08:35:55.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:55 vm02 bash[17473]: cluster 2026-03-10T08:35:53.901848+0000 mgr.vm02.ttibzz (mgr.14162) 27 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:55.549 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:55 vm02 bash[17473]: cluster 2026-03-10T08:35:53.901848+0000 mgr.vm02.ttibzz (mgr.14162) 27 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:57.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:56 vm02 bash[17473]: cluster 2026-03-10T08:35:55.902028+0000 mgr.vm02.ttibzz (mgr.14162) 28 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:57.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:56 vm02 bash[17473]: cluster 2026-03-10T08:35:55.902028+0000 mgr.vm02.ttibzz (mgr.14162) 28 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:57.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:56 vm02 bash[17473]: audit 2026-03-10T08:35:55.968817+0000 mon.vm02 (mon.0) 149 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:57.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:56 vm02 bash[17473]: audit 2026-03-10T08:35:55.968817+0000 mon.vm02 (mon.0) 149 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:35:57.302 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:35:58.006 INFO:teuthology.orchestra.run.vm02.stderr:adjusted tunables profile to default 2026-03-10T08:35:58.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:57 vm02 bash[17473]: audit 2026-03-10T08:35:57.781805+0000 mon.vm02 (mon.0) 150 : audit [INF] from='client.? 192.168.123.102:0/1741032227' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T08:35:58.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:57 vm02 bash[17473]: audit 2026-03-10T08:35:57.781805+0000 mon.vm02 (mon.0) 150 : audit [INF] from='client.? 192.168.123.102:0/1741032227' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-10T08:35:58.314 INFO:tasks.cephadm:Adding mon.vm02 on vm02 2026-03-10T08:35:58.314 INFO:tasks.cephadm:Adding mon.vm07 on vm07 2026-03-10T08:35:58.314 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch apply mon '2;vm02:192.168.123.102=vm02;vm07:192.168.123.107=vm07' 2026-03-10T08:35:59.443 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T08:35:59.492 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 bash[17473]: cluster 2026-03-10T08:35:57.902220+0000 mgr.vm02.ttibzz (mgr.14162) 29 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:59.492 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 bash[17473]: cluster 2026-03-10T08:35:57.902220+0000 mgr.vm02.ttibzz (mgr.14162) 29 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:35:59.492 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 bash[17473]: audit 2026-03-10T08:35:57.998814+0000 mon.vm02 (mon.0) 151 : audit [INF] from='client.? 192.168.123.102:0/1741032227' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T08:35:59.492 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 bash[17473]: audit 2026-03-10T08:35:57.998814+0000 mon.vm02 (mon.0) 151 : audit [INF] from='client.? 192.168.123.102:0/1741032227' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-10T08:35:59.492 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 bash[17473]: cluster 2026-03-10T08:35:58.000626+0000 mon.vm02 (mon.0) 152 : cluster [DBG] osdmap e4: 0 total, 0 up, 0 in 2026-03-10T08:35:59.492 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 bash[17473]: cluster 2026-03-10T08:35:58.000626+0000 mon.vm02 (mon.0) 152 : cluster [DBG] osdmap e4: 0 total, 0 up, 0 in 2026-03-10T08:35:59.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:35:59.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:35:59 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:36:00.468 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T08:36:00.731 INFO:teuthology.orchestra.run.vm07.stdout:Scheduled mon update... 2026-03-10T08:36:00.792 DEBUG:teuthology.orchestra.run.vm07:mon.vm07> sudo journalctl -f -n 0 -u ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm07.service 2026-03-10T08:36:00.793 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T08:36:00.793 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph mon dump -f json 2026-03-10T08:36:01.109 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.813171+0000 mon.vm02 (mon.0) 153 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.813171+0000 mon.vm02 (mon.0) 153 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.817086+0000 mon.vm02 (mon.0) 154 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.817086+0000 mon.vm02 (mon.0) 154 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.821584+0000 mon.vm02 (mon.0) 155 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.821584+0000 mon.vm02 (mon.0) 155 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.823097+0000 mon.vm02 (mon.0) 156 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:35:59.823097+0000 mon.vm02 (mon.0) 156 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: cluster 2026-03-10T08:35:59.902419+0000 mgr.vm02.ttibzz (mgr.14162) 30 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: cluster 2026-03-10T08:35:59.902419+0000 mgr.vm02.ttibzz (mgr.14162) 30 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:36:00.726847+0000 mon.vm02 (mon.0) 157 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.110 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:00 vm02 bash[17473]: audit 2026-03-10T08:36:00.726847+0000 mon.vm02 (mon.0) 157 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:01.976 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T08:36:02.132 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:01 vm02 bash[17473]: audit 2026-03-10T08:36:00.824032+0000 mon.vm02 (mon.0) 158 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T08:36:02.132 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:01 vm02 bash[17473]: audit 2026-03-10T08:36:00.824032+0000 mon.vm02 (mon.0) 158 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-10T08:36:02.132 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:01 vm02 bash[17473]: cluster 2026-03-10T08:36:00.825542+0000 mon.vm02 (mon.0) 159 : cluster [DBG] mgrmap e13: vm02.ttibzz(active, since 34s) 2026-03-10T08:36:02.132 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:01 vm02 bash[17473]: cluster 2026-03-10T08:36:00.825542+0000 mon.vm02 (mon.0) 159 : cluster [DBG] mgrmap e13: vm02.ttibzz(active, since 34s) 2026-03-10T08:36:02.997 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T08:36:03.281 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T08:36:03.281 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T08:36:03.281 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"e750d050-1c5b-11f1-9e63-531fde0192f6","modified":"2026-03-10T08:35:02.208022Z","created":"2026-03-10T08:35:02.208022Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T08:36:03.492 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:03 vm02 bash[17473]: audit 2026-03-10T08:36:03.277703+0000 mon.vm02 (mon.0) 160 : audit [DBG] from='client.? 192.168.123.107:0/1930715989' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:03.493 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:03 vm02 bash[17473]: audit 2026-03-10T08:36:03.277703+0000 mon.vm02 (mon.0) 160 : audit [DBG] from='client.? 192.168.123.107:0/1930715989' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:04.338 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T08:36:04.339 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph mon dump -f json 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.177896+0000 mon.vm02 (mon.0) 161 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.177896+0000 mon.vm02 (mon.0) 161 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.178429+0000 mon.vm02 (mon.0) 162 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.178429+0000 mon.vm02 (mon.0) 162 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.183401+0000 mon.vm02 (mon.0) 163 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.183401+0000 mon.vm02 (mon.0) 163 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.183535+0000 mon.vm02 (mon.0) 164 : cluster [DBG] mgrmap e14: vm02.ttibzz(active, starting, since 0.00525893s) 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.183535+0000 mon.vm02 (mon.0) 164 : cluster [DBG] mgrmap e14: vm02.ttibzz(active, starting, since 0.00525893s) 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.186849+0000 mon.vm02 (mon.0) 165 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.186849+0000 mon.vm02 (mon.0) 165 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.187294+0000 mon.vm02 (mon.0) 166 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.187294+0000 mon.vm02 (mon.0) 166 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.189059+0000 mon.vm02 (mon.0) 167 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.189059+0000 mon.vm02 (mon.0) 167 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.189439+0000 mon.vm02 (mon.0) 168 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.189439+0000 mon.vm02 (mon.0) 168 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.189798+0000 mon.vm02 (mon.0) 169 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.189798+0000 mon.vm02 (mon.0) 169 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.196404+0000 mon.vm02 (mon.0) 170 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: cluster 2026-03-10T08:36:04.196404+0000 mon.vm02 (mon.0) 170 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.214537+0000 mon.vm02 (mon.0) 171 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.214537+0000 mon.vm02 (mon.0) 171 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.219721+0000 mon.vm02 (mon.0) 172 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.219721+0000 mon.vm02 (mon.0) 172 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.237609+0000 mon.vm02 (mon.0) 173 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.237609+0000 mon.vm02 (mon.0) 173 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.243680+0000 mon.vm02 (mon.0) 174 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.243680+0000 mon.vm02 (mon.0) 174 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.274287+0000 mon.vm02 (mon.0) 175 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:04 vm02 bash[17473]: audit 2026-03-10T08:36:04.274287+0000 mon.vm02 (mon.0) 175 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:05.456 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T08:36:06.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:05 vm02 bash[17473]: audit 2026-03-10T08:36:04.676654+0000 mon.vm02 (mon.0) 176 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:06.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:05 vm02 bash[17473]: audit 2026-03-10T08:36:04.676654+0000 mon.vm02 (mon.0) 176 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:06.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:05 vm02 bash[17473]: cluster 2026-03-10T08:36:05.189254+0000 mon.vm02 (mon.0) 177 : cluster [DBG] mgrmap e15: vm02.ttibzz(active, since 1.01097s) 2026-03-10T08:36:06.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:05 vm02 bash[17473]: cluster 2026-03-10T08:36:05.189254+0000 mon.vm02 (mon.0) 177 : cluster [DBG] mgrmap e15: vm02.ttibzz(active, since 1.01097s) 2026-03-10T08:36:06.479 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T08:36:06.802 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T08:36:06.802 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T08:36:06.802 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"e750d050-1c5b-11f1-9e63-531fde0192f6","modified":"2026-03-10T08:35:02.208022Z","created":"2026-03-10T08:35:02.208022Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T08:36:07.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.053604+0000 mgr.vm02.ttibzz (mgr.14195) 1 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Bus STARTING 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.053604+0000 mgr.vm02.ttibzz (mgr.14195) 1 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Bus STARTING 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.162188+0000 mgr.vm02.ttibzz (mgr.14195) 2 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.162188+0000 mgr.vm02.ttibzz (mgr.14195) 2 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.162735+0000 mgr.vm02.ttibzz (mgr.14195) 3 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Client ('192.168.123.102', 57052) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.162735+0000 mgr.vm02.ttibzz (mgr.14195) 3 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Client ('192.168.123.102', 57052) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.263805+0000 mgr.vm02.ttibzz (mgr.14195) 4 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.263805+0000 mgr.vm02.ttibzz (mgr.14195) 4 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.263849+0000 mgr.vm02.ttibzz (mgr.14195) 5 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Bus STARTED 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: cephadm 2026-03-10T08:36:05.263849+0000 mgr.vm02.ttibzz (mgr.14195) 5 : cephadm [INF] [10/Mar/2026:08:36:05] ENGINE Bus STARTED 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: audit 2026-03-10T08:36:05.974994+0000 mon.vm02 (mon.0) 178 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: audit 2026-03-10T08:36:05.974994+0000 mon.vm02 (mon.0) 178 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: audit 2026-03-10T08:36:06.570903+0000 mon.vm02 (mon.0) 179 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:07.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:06 vm02 bash[17473]: audit 2026-03-10T08:36:06.570903+0000 mon.vm02 (mon.0) 179 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:07.868 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T08:36:07.868 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph mon dump -f json 2026-03-10T08:36:08.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:07 vm02 bash[17473]: audit 2026-03-10T08:36:06.798350+0000 mon.vm02 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.107:0/2770521868' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:08.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:07 vm02 bash[17473]: audit 2026-03-10T08:36:06.798350+0000 mon.vm02 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.107:0/2770521868' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:08.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:07 vm02 bash[17473]: cluster 2026-03-10T08:36:06.978154+0000 mon.vm02 (mon.0) 181 : cluster [DBG] mgrmap e16: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:08.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:07 vm02 bash[17473]: cluster 2026-03-10T08:36:06.978154+0000 mon.vm02 (mon.0) 181 : cluster [DBG] mgrmap e16: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:09.941 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.468780+0000 mon.vm02 (mon.0) 182 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.468780+0000 mon.vm02 (mon.0) 182 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.472054+0000 mon.vm02 (mon.0) 183 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.472054+0000 mon.vm02 (mon.0) 183 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.476860+0000 mon.vm02 (mon.0) 184 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.476860+0000 mon.vm02 (mon.0) 184 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.479823+0000 mon.vm02 (mon.0) 185 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.479823+0000 mon.vm02 (mon.0) 185 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.480726+0000 mon.vm02 (mon.0) 186 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.480726+0000 mon.vm02 (mon.0) 186 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.833858+0000 mon.vm02 (mon.0) 187 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.833858+0000 mon.vm02 (mon.0) 187 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.836955+0000 mon.vm02 (mon.0) 188 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:09.836955+0000 mon.vm02 (mon.0) 188 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.425454+0000 mon.vm02 (mon.0) 189 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.425454+0000 mon.vm02 (mon.0) 189 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.429591+0000 mon.vm02 (mon.0) 190 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.429591+0000 mon.vm02 (mon.0) 190 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.430829+0000 mon.vm02 (mon.0) 191 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.430829+0000 mon.vm02 (mon.0) 191 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.431586+0000 mon.vm02 (mon.0) 192 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.431586+0000 mon.vm02 (mon.0) 192 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.432085+0000 mon.vm02 (mon.0) 193 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:10.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:10 vm02 bash[17473]: audit 2026-03-10T08:36:10.432085+0000 mon.vm02 (mon.0) 193 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:10.963 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:11.629 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 1 2026-03-10T08:36:11.630 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T08:36:11.630 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":1,"fsid":"e750d050-1c5b-11f1-9e63-531fde0192f6","modified":"2026-03-10T08:35:02.208022Z","created":"2026-03-10T08:35:02.208022Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-10T08:36:12.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.432764+0000 mgr.vm02.ttibzz (mgr.14195) 6 : cephadm [INF] Updating vm02:/etc/ceph/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.432764+0000 mgr.vm02.ttibzz (mgr.14195) 6 : cephadm [INF] Updating vm02:/etc/ceph/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.432868+0000 mgr.vm02.ttibzz (mgr.14195) 7 : cephadm [INF] Updating vm07:/etc/ceph/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.432868+0000 mgr.vm02.ttibzz (mgr.14195) 7 : cephadm [INF] Updating vm07:/etc/ceph/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.481183+0000 mgr.vm02.ttibzz (mgr.14195) 8 : cephadm [INF] Updating vm02:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.481183+0000 mgr.vm02.ttibzz (mgr.14195) 8 : cephadm [INF] Updating vm02:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.481411+0000 mgr.vm02.ttibzz (mgr.14195) 9 : cephadm [INF] Updating vm07:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.481411+0000 mgr.vm02.ttibzz (mgr.14195) 9 : cephadm [INF] Updating vm07:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.516397+0000 mgr.vm02.ttibzz (mgr.14195) 10 : cephadm [INF] Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.516397+0000 mgr.vm02.ttibzz (mgr.14195) 10 : cephadm [INF] Updating vm02:/etc/ceph/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.522917+0000 mgr.vm02.ttibzz (mgr.14195) 11 : cephadm [INF] Updating vm07:/etc/ceph/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.522917+0000 mgr.vm02.ttibzz (mgr.14195) 11 : cephadm [INF] Updating vm07:/etc/ceph/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.552867+0000 mgr.vm02.ttibzz (mgr.14195) 12 : cephadm [INF] Updating vm02:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.552867+0000 mgr.vm02.ttibzz (mgr.14195) 12 : cephadm [INF] Updating vm02:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.560217+0000 mgr.vm02.ttibzz (mgr.14195) 13 : cephadm [INF] Updating vm07:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.560217+0000 mgr.vm02.ttibzz (mgr.14195) 13 : cephadm [INF] Updating vm07:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.client.admin.keyring 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.593910+0000 mon.vm02 (mon.0) 194 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.593910+0000 mon.vm02 (mon.0) 194 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.597431+0000 mon.vm02 (mon.0) 195 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.597431+0000 mon.vm02 (mon.0) 195 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.599852+0000 mon.vm02 (mon.0) 196 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.599852+0000 mon.vm02 (mon.0) 196 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.602087+0000 mon.vm02 (mon.0) 197 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.602087+0000 mon.vm02 (mon.0) 197 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.604760+0000 mon.vm02 (mon.0) 198 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.604760+0000 mon.vm02 (mon.0) 198 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.605994+0000 mon.vm02 (mon.0) 199 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.605994+0000 mon.vm02 (mon.0) 199 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.607083+0000 mon.vm02 (mon.0) 200 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.607083+0000 mon.vm02 (mon.0) 200 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.608502+0000 mon.vm02 (mon.0) 201 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: audit 2026-03-10T08:36:10.608502+0000 mon.vm02 (mon.0) 201 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.609139+0000 mgr.vm02.ttibzz (mgr.14195) 14 : cephadm [INF] Deploying daemon ceph-exporter.vm07 on vm07 2026-03-10T08:36:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:11 vm02 bash[17473]: cephadm 2026-03-10T08:36:10.609139+0000 mgr.vm02.ttibzz (mgr.14195) 14 : cephadm [INF] Deploying daemon ceph-exporter.vm07 on vm07 2026-03-10T08:36:12.836 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-10T08:36:12.837 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph mon dump -f json 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:11.626098+0000 mon.vm02 (mon.0) 202 : audit [DBG] from='client.? 192.168.123.107:0/148547622' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:11.626098+0000 mon.vm02 (mon.0) 202 : audit [DBG] from='client.? 192.168.123.107:0/148547622' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.006728+0000 mon.vm02 (mon.0) 203 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.006728+0000 mon.vm02 (mon.0) 203 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.009215+0000 mon.vm02 (mon.0) 204 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.009215+0000 mon.vm02 (mon.0) 204 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.011377+0000 mon.vm02 (mon.0) 205 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.011377+0000 mon.vm02 (mon.0) 205 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.013518+0000 mon.vm02 (mon.0) 206 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.013518+0000 mon.vm02 (mon.0) 206 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.014180+0000 mon.vm02 (mon.0) 207 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.014180+0000 mon.vm02 (mon.0) 207 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.015124+0000 mon.vm02 (mon.0) 208 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.015124+0000 mon.vm02 (mon.0) 208 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.016266+0000 mon.vm02 (mon.0) 209 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:12 vm02 bash[17473]: audit 2026-03-10T08:36:12.016266+0000 mon.vm02 (mon.0) 209 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: cephadm 2026-03-10T08:36:12.016809+0000 mgr.vm02.ttibzz (mgr.14195) 15 : cephadm [INF] Deploying daemon crash.vm07 on vm07 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: cephadm 2026-03-10T08:36:12.016809+0000 mgr.vm02.ttibzz (mgr.14195) 15 : cephadm [INF] Deploying daemon crash.vm07 on vm07 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.798787+0000 mon.vm02 (mon.0) 210 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.798787+0000 mon.vm02 (mon.0) 210 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.801104+0000 mon.vm02 (mon.0) 211 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.801104+0000 mon.vm02 (mon.0) 211 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.803187+0000 mon.vm02 (mon.0) 212 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.803187+0000 mon.vm02 (mon.0) 212 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.805080+0000 mon.vm02 (mon.0) 213 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:12.805080+0000 mon.vm02 (mon.0) 213 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: cephadm 2026-03-10T08:36:12.805874+0000 mgr.vm02.ttibzz (mgr.14195) 16 : cephadm [INF] Deploying daemon node-exporter.vm07 on vm07 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: cephadm 2026-03-10T08:36:12.805874+0000 mgr.vm02.ttibzz (mgr.14195) 16 : cephadm [INF] Deploying daemon node-exporter.vm07 on vm07 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.531939+0000 mon.vm02 (mon.0) 214 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.531939+0000 mon.vm02 (mon.0) 214 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.534899+0000 mon.vm02 (mon.0) 215 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.534899+0000 mon.vm02 (mon.0) 215 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.537609+0000 mon.vm02 (mon.0) 216 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.537609+0000 mon.vm02 (mon.0) 216 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.564147+0000 mon.vm02 (mon.0) 217 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.564147+0000 mon.vm02 (mon.0) 217 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.565425+0000 mon.vm02 (mon.0) 218 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm07.aunzpk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.565425+0000 mon.vm02 (mon.0) 218 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm07.aunzpk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.566356+0000 mon.vm02 (mon.0) 219 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm07.aunzpk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.566356+0000 mon.vm02 (mon.0) 219 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm07.aunzpk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.567389+0000 mon.vm02 (mon.0) 220 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.567389+0000 mon.vm02 (mon.0) 220 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.568203+0000 mon.vm02 (mon.0) 221 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:14.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:13 vm02 bash[17473]: audit 2026-03-10T08:36:13.568203+0000 mon.vm02 (mon.0) 221 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:14.651 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:14 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: cephadm 2026-03-10T08:36:13.568830+0000 mgr.vm02.ttibzz (mgr.14195) 17 : cephadm [INF] Deploying daemon mgr.vm07.aunzpk on vm07 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: cephadm 2026-03-10T08:36:13.568830+0000 mgr.vm02.ttibzz (mgr.14195) 17 : cephadm [INF] Deploying daemon mgr.vm07.aunzpk on vm07 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.218426+0000 mon.vm02 (mon.0) 222 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.218426+0000 mon.vm02 (mon.0) 222 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.355427+0000 mon.vm02 (mon.0) 223 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.355427+0000 mon.vm02 (mon.0) 223 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.358794+0000 mon.vm02 (mon.0) 224 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.358794+0000 mon.vm02 (mon.0) 224 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.362376+0000 mon.vm02 (mon.0) 225 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.362376+0000 mon.vm02 (mon.0) 225 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.365532+0000 mon.vm02 (mon.0) 226 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.365532+0000 mon.vm02 (mon.0) 226 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.366852+0000 mon.vm02 (mon.0) 227 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.366852+0000 mon.vm02 (mon.0) 227 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.367420+0000 mon.vm02 (mon.0) 228 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:15.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:14 vm02 bash[17473]: audit 2026-03-10T08:36:14.367420+0000 mon.vm02 (mon.0) 228 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:15.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:36:15.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:36:15.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:36:15.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:36:15.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 systemd[1]: Started Ceph mon.vm07 for e750d050-1c5b-11f1-9e63-531fde0192f6. 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.770+0000 7f94eda63d80 0 set uid:gid to 167:167 (ceph:ceph) 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.770+0000 7f94eda63d80 0 ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 7 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.770+0000 7f94eda63d80 0 pidfile_write: ignore empty --pid-file 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.770+0000 7f94eda63d80 0 load: jerasure load: lrc 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: RocksDB version: 7.9.2 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Git sha 0 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Compile date 2026-02-25 18:11:04 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: DB SUMMARY 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: DB Session ID: R5H6VRFZK1FSHL80WBB8 2026-03-10T08:36:15.794 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: CURRENT file: CURRENT 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: IDENTITY file: IDENTITY 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm07/store.db dir, Total Num: 0, files: 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm07/store.db: 000004.log size: 511 ; 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.error_if_exists: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.create_if_missing: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.paranoid_checks: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.env: 0x558bb4e4bdc0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.fs: PosixFileSystem 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.info_log: 0x558bc76b19a0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_file_opening_threads: 16 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.statistics: (nil) 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.use_fsync: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_log_file_size: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.log_file_time_to_roll: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.keep_log_file_num: 1000 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.recycle_log_file_num: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.allow_fallocate: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.allow_mmap_reads: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.allow_mmap_writes: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.use_direct_reads: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.create_missing_column_families: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.db_log_dir: 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.wal_dir: 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.table_cache_numshardbits: 6 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.advise_random_on_open: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.db_write_buffer_size: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.write_buffer_manager: 0x558bc76b5900 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.use_adaptive_mutex: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.rate_limiter: (nil) 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.wal_recovery_mode: 2 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.enable_thread_tracking: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.enable_pipelined_write: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.unordered_write: 0 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.row_cache: None 2026-03-10T08:36:16.081 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.wal_filter: None 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.allow_ingest_behind: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.two_write_queues: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.manual_wal_flush: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.wal_compression: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.atomic_flush: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.persist_stats_to_disk: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.log_readahead_size: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.best_efforts_recovery: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.allow_data_in_errors: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.db_host_id: __hostname__ 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.enforce_single_del_contracts: true 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_background_jobs: 2 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_background_compactions: -1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_subcompactions: 1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.delayed_write_rate : 16777216 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_total_wal_size: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.stats_dump_period_sec: 600 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.stats_persist_period_sec: 600 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_open_files: -1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bytes_per_sync: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_readahead_size: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_background_flushes: -1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Compression algorithms supported: 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kZSTD supported: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kXpressCompression supported: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kBZip2Compression supported: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kLZ4Compression supported: 1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kZlibCompression supported: 1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kLZ4HCCompression supported: 1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: kSnappyCompression supported: 1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm07/store.db/MANIFEST-000005 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.merge_operator: 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_filter: None 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_filter_factory: None 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.sst_partitioner_factory: None 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.table_factory: BlockBasedTable 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x558bc76b1600) 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cache_index_and_filter_blocks: 1 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-10T08:36:16.082 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: pin_top_level_index_and_filter: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: index_type: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: data_block_index_type: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: index_shortening: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: data_block_hash_table_util_ratio: 0.750000 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: checksum: 4 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: no_block_cache: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_cache: 0x558bc76d69b0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_cache_name: BinnedLRUCache 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_cache_options: 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: capacity : 536870912 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: num_shard_bits : 4 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: strict_capacity_limit : 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: high_pri_pool_ratio: 0.000 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_cache_compressed: (nil) 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: persistent_cache: (nil) 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_size: 4096 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_size_deviation: 10 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_restart_interval: 16 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: index_block_restart_interval: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: metadata_block_size: 4096 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: partition_filters: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: use_delta_encoding: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: filter_policy: bloomfilter 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: whole_key_filtering: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: verify_compression: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: read_amp_bytes_per_bit: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: format_version: 5 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: enable_index_compression: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: block_align: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: max_auto_readahead_size: 262144 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: prepopulate_block_cache: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: initial_auto_readahead_size: 8192 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: num_file_reads_for_auto_readahead: 2 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.write_buffer_size: 33554432 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_write_buffer_number: 2 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression: NoCompression 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression: Disabled 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.prefix_extractor: nullptr 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.num_levels: 7 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.window_bits: -14 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.level: 32767 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.strategy: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-10T08:36:16.083 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.enabled: false 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.target_file_size_base: 67108864 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.target_file_size_multiplier: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.arena_block_size: 1048576 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.disable_auto_compactions: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.inplace_update_support: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.memtable_huge_page_size: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.bloom_locality: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.max_successive_merges: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.paranoid_file_checks: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.force_consistency_checks: 1 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.report_bg_io_stats: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.ttl: 2592000 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.enable_blob_files: false 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.min_blob_size: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.blob_file_size: 268435456 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.blob_compression_type: NoCompression 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.blob_file_starting_level: 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.782+0000 7f94eda63d80 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.786+0000 7f94eda63d80 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm07/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-10T08:36:16.084 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.786+0000 7f94eda63d80 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.786+0000 7f94eda63d80 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 53506a52-2878-4888-9af9-c61a974387d2 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.786+0000 7f94eda63d80 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1773131775791427, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.786+0000 7f94eda63d80 4 rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.798+0000 7f94eda63d80 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1773131775803798, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773131775, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "53506a52-2878-4888-9af9-c61a974387d2", "db_session_id": "R5H6VRFZK1FSHL80WBB8", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.798+0000 7f94eda63d80 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1773131775803917, "job": 1, "event": "recovery_finished"} 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.798+0000 7f94eda63d80 4 rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.802+0000 7f94eda63d80 4 rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm07/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.802+0000 7f94eda63d80 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x558bc76d8e00 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.802+0000 7f94eda63d80 4 rocksdb: DB pointer 0x558bc77ee000 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.802+0000 7f94eda63d80 0 mon.vm07 does not exist in monmap, will attempt to join an existing cluster 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.802+0000 7f94eda63d80 0 using public_addr v2:192.168.123.107:0/0 -> [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.802+0000 7f94e382d640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.802+0000 7f94e382d640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: ** DB Stats ** 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: ** Compaction Stats [default] ** 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.012 0 0 0.0 0.0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.012 0 0 0.0 0.0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.1 0.01 0.00 1 0.012 0 0 0.0 0.0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: ** Compaction Stats [default] ** 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.1 0.01 0.00 1 0.012 0 0 0.0 0.0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Uptime(secs): 0.0 total, 0.0 interval 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Flush(GB): cumulative 0.000, interval 0.000 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: AddFile(Total Files): cumulative 0, interval 0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: AddFile(L0 Files): cumulative 0, interval 0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: AddFile(Keys): cumulative 0, interval 0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Cumulative compaction: 0.00 GB write, 0.07 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Interval compaction: 0.00 GB write, 0.07 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Block cache BinnedLRUCache@0x558bc76d69b0#7 capacity: 512.00 MB usage: 0.86 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 5e-06 secs_since: 0 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: Block cache entry stats(count,size,portion): DataBlock(1,0.64 KB,0.00012219%) FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: ** File Read Latency Histogram By Level [default] ** 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.810+0000 7f94eda63d80 0 starting mon.vm07 rank -1 at public addrs [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] at bind addrs [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] mon_data /var/lib/ceph/mon/ceph-vm07 fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.810+0000 7f94eda63d80 1 mon.vm07@-1(???) e0 preinit fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 0 mon.vm07@-1(synchronizing).mds e1 new map 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 0 mon.vm07@-1(synchronizing).mds e1 print_map 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: e1 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: btime 2026-03-10T08:35:03:450016+0000 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: enable_multiple, ever_enabled_multiple: 1,1 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: legacy client fscid: -1 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: No filesystems configured 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-10T08:36:16.085 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 0 mon.vm07@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 0 mon.vm07@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 0 mon.vm07@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: debug 2026-03-10T08:36:15.862+0000 7f94e6833640 0 mon.vm07@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:03.450456+0000 mon.vm02 (mon.0) 0 : cluster [INF] mkfs e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:03.450456+0000 mon.vm02 (mon.0) 0 : cluster [INF] mkfs e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:03.445440+0000 mon.vm02 (mon.0) 1 : cluster [INF] mon.vm02 is new leader, mons vm02 in quorum (ranks 0) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:03.445440+0000 mon.vm02 (mon.0) 1 : cluster [INF] mon.vm02 is new leader, mons vm02 in quorum (ranks 0) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427873+0000 mon.vm02 (mon.0) 1 : cluster [INF] mon.vm02 is new leader, mons vm02 in quorum (ranks 0) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427873+0000 mon.vm02 (mon.0) 1 : cluster [INF] mon.vm02 is new leader, mons vm02 in quorum (ranks 0) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427912+0000 mon.vm02 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427912+0000 mon.vm02 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427925+0000 mon.vm02 (mon.0) 3 : cluster [DBG] fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427925+0000 mon.vm02 (mon.0) 3 : cluster [DBG] fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427928+0000 mon.vm02 (mon.0) 4 : cluster [DBG] last_changed 2026-03-10T08:35:02.208022+0000 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427928+0000 mon.vm02 (mon.0) 4 : cluster [DBG] last_changed 2026-03-10T08:35:02.208022+0000 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427936+0000 mon.vm02 (mon.0) 5 : cluster [DBG] created 2026-03-10T08:35:02.208022+0000 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427936+0000 mon.vm02 (mon.0) 5 : cluster [DBG] created 2026-03-10T08:35:02.208022+0000 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427940+0000 mon.vm02 (mon.0) 6 : cluster [DBG] min_mon_release 19 (squid) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427940+0000 mon.vm02 (mon.0) 6 : cluster [DBG] min_mon_release 19 (squid) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427944+0000 mon.vm02 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427944+0000 mon.vm02 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427949+0000 mon.vm02 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.427949+0000 mon.vm02 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.428183+0000 mon.vm02 (mon.0) 9 : cluster [DBG] fsmap 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.428183+0000 mon.vm02 (mon.0) 9 : cluster [DBG] fsmap 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.428195+0000 mon.vm02 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.428195+0000 mon.vm02 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.428855+0000 mon.vm02 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:04.428855+0000 mon.vm02 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:04.476248+0000 mon.vm02 (mon.0) 12 : audit [INF] from='client.? 192.168.123.102:0/1429830139' entity='client.admin' 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:04.476248+0000 mon.vm02 (mon.0) 12 : audit [INF] from='client.? 192.168.123.102:0/1429830139' entity='client.admin' 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:05.120808+0000 mon.vm02 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.102:0/2467793842' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:05.120808+0000 mon.vm02 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.102:0/2467793842' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:07.397830+0000 mon.vm02 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.102:0/511057920' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:07.397830+0000 mon.vm02 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.102:0/511057920' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:08.510712+0000 mon.vm02 (mon.0) 15 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:08.510712+0000 mon.vm02 (mon.0) 15 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:08.515550+0000 mon.vm02 (mon.0) 16 : cluster [DBG] mgrmap e2: vm02.ttibzz(active, starting, since 0.00497153s) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:08.515550+0000 mon.vm02 (mon.0) 16 : cluster [DBG] mgrmap e2: vm02.ttibzz(active, starting, since 0.00497153s) 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.519229+0000 mon.vm02 (mon.0) 17 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.519229+0000 mon.vm02 (mon.0) 17 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.519733+0000 mon.vm02 (mon.0) 18 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.519733+0000 mon.vm02 (mon.0) 18 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.520120+0000 mon.vm02 (mon.0) 19 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.520120+0000 mon.vm02 (mon.0) 19 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.520478+0000 mon.vm02 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.520478+0000 mon.vm02 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.520858+0000 mon.vm02 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.520858+0000 mon.vm02 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.521214+0000 mon.vm02 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.086 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.521214+0000 mon.vm02 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.522434+0000 mon.vm02 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.522434+0000 mon.vm02 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.523295+0000 mon.vm02 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.523295+0000 mon.vm02 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:08.530931+0000 mon.vm02 (mon.0) 25 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:08.530931+0000 mon.vm02 (mon.0) 25 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.541005+0000 mon.vm02 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.541005+0000 mon.vm02 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.541862+0000 mon.vm02 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.541862+0000 mon.vm02 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.544761+0000 mon.vm02 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.544761+0000 mon.vm02 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.546694+0000 mon.vm02 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.546694+0000 mon.vm02 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.549490+0000 mon.vm02 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:08.549490+0000 mon.vm02 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.102:0/89125631' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:09.519323+0000 mon.vm02 (mon.0) 31 : cluster [DBG] mgrmap e3: vm02.ttibzz(active, since 1.00875s) 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:09.519323+0000 mon.vm02 (mon.0) 31 : cluster [DBG] mgrmap e3: vm02.ttibzz(active, since 1.00875s) 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:09.744889+0000 mon.vm02 (mon.0) 32 : audit [DBG] from='client.? 192.168.123.102:0/4227904123' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:09.744889+0000 mon.vm02 (mon.0) 32 : audit [DBG] from='client.? 192.168.123.102:0/4227904123' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.040006+0000 mon.vm02 (mon.0) 33 : audit [INF] from='client.? 192.168.123.102:0/56060668' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.040006+0000 mon.vm02 (mon.0) 33 : audit [INF] from='client.? 192.168.123.102:0/56060668' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.328476+0000 mon.vm02 (mon.0) 34 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.328476+0000 mon.vm02 (mon.0) 34 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.524545+0000 mon.vm02 (mon.0) 35 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.524545+0000 mon.vm02 (mon.0) 35 : audit [INF] from='client.? 192.168.123.102:0/866742214' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:10.528320+0000 mon.vm02 (mon.0) 36 : cluster [DBG] mgrmap e4: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:10.528320+0000 mon.vm02 (mon.0) 36 : cluster [DBG] mgrmap e4: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.899248+0000 mon.vm02 (mon.0) 37 : audit [DBG] from='client.? 192.168.123.102:0/115395052' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:10.899248+0000 mon.vm02 (mon.0) 37 : audit [DBG] from='client.? 192.168.123.102:0/115395052' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.970758+0000 mon.vm02 (mon.0) 38 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.970758+0000 mon.vm02 (mon.0) 38 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.970985+0000 mon.vm02 (mon.0) 39 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.970985+0000 mon.vm02 (mon.0) 39 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.975582+0000 mon.vm02 (mon.0) 40 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.975582+0000 mon.vm02 (mon.0) 40 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.975711+0000 mon.vm02 (mon.0) 41 : cluster [DBG] mgrmap e5: vm02.ttibzz(active, starting, since 0.00482893s) 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.975711+0000 mon.vm02 (mon.0) 41 : cluster [DBG] mgrmap e5: vm02.ttibzz(active, starting, since 0.00482893s) 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.978897+0000 mon.vm02 (mon.0) 42 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.978897+0000 mon.vm02 (mon.0) 42 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.979183+0000 mon.vm02 (mon.0) 43 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.979183+0000 mon.vm02 (mon.0) 43 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.980115+0000 mon.vm02 (mon.0) 44 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.980115+0000 mon.vm02 (mon.0) 44 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.980322+0000 mon.vm02 (mon.0) 45 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.980322+0000 mon.vm02 (mon.0) 45 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.980524+0000 mon.vm02 (mon.0) 46 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.980524+0000 mon.vm02 (mon.0) 46 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.987234+0000 mon.vm02 (mon.0) 47 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:13.987234+0000 mon.vm02 (mon.0) 47 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.996714+0000 mon.vm02 (mon.0) 48 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:13.996714+0000 mon.vm02 (mon.0) 48 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.000504+0000 mon.vm02 (mon.0) 49 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.000504+0000 mon.vm02 (mon.0) 49 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.013453+0000 mon.vm02 (mon.0) 50 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.013453+0000 mon.vm02 (mon.0) 50 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.013633+0000 mon.vm02 (mon.0) 51 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.013633+0000 mon.vm02 (mon.0) 51 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.016094+0000 mon.vm02 (mon.0) 52 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.016094+0000 mon.vm02 (mon.0) 52 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.087 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:13.994191+0000 mgr.vm02.ttibzz (mgr.14118) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:13.994191+0000 mgr.vm02.ttibzz (mgr.14118) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.027953+0000 mon.vm02 (mon.0) 53 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.027953+0000 mon.vm02 (mon.0) 53 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.642072+0000 mon.vm02 (mon.0) 54 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.642072+0000 mon.vm02 (mon.0) 54 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.645075+0000 mon.vm02 (mon.0) 55 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.645075+0000 mon.vm02 (mon.0) 55 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:14.979402+0000 mon.vm02 (mon.0) 56 : cluster [DBG] mgrmap e6: vm02.ttibzz(active, since 1.00852s) 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:14.979402+0000 mon.vm02 (mon.0) 56 : cluster [DBG] mgrmap e6: vm02.ttibzz(active, since 1.00852s) 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.979954+0000 mgr.vm02.ttibzz (mgr.14118) 2 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.979954+0000 mgr.vm02.ttibzz (mgr.14118) 2 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.983865+0000 mgr.vm02.ttibzz (mgr.14118) 3 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:14.983865+0000 mgr.vm02.ttibzz (mgr.14118) 3 : audit [DBG] from='client.14122 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.223374+0000 mgr.vm02.ttibzz (mgr.14118) 4 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTING 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.223374+0000 mgr.vm02.ttibzz (mgr.14118) 4 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTING 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.329311+0000 mgr.vm02.ttibzz (mgr.14118) 5 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.329311+0000 mgr.vm02.ttibzz (mgr.14118) 5 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.332622+0000 mon.vm02 (mon.0) 57 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.332622+0000 mon.vm02 (mon.0) 57 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.337143+0000 mon.vm02 (mon.0) 58 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.337143+0000 mon.vm02 (mon.0) 58 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.341161+0000 mgr.vm02.ttibzz (mgr.14118) 6 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.341161+0000 mgr.vm02.ttibzz (mgr.14118) 6 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.341606+0000 mgr.vm02.ttibzz (mgr.14118) 7 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Client ('192.168.123.102', 42084) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.341606+0000 mgr.vm02.ttibzz (mgr.14118) 7 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Client ('192.168.123.102', 42084) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.442210+0000 mgr.vm02.ttibzz (mgr.14118) 8 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.442210+0000 mgr.vm02.ttibzz (mgr.14118) 8 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.442248+0000 mgr.vm02.ttibzz (mgr.14118) 9 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTED 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.442248+0000 mgr.vm02.ttibzz (mgr.14118) 9 : cephadm [INF] [10/Mar/2026:08:35:15] ENGINE Bus STARTED 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.442961+0000 mon.vm02 (mon.0) 59 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.442961+0000 mon.vm02 (mon.0) 59 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.885748+0000 mon.vm02 (mon.0) 60 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.885748+0000 mon.vm02 (mon.0) 60 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.888187+0000 mon.vm02 (mon.0) 61 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.888187+0000 mon.vm02 (mon.0) 61 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.606084+0000 mgr.vm02.ttibzz (mgr.14118) 10 : audit [DBG] from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.606084+0000 mgr.vm02.ttibzz (mgr.14118) 10 : audit [DBG] from='client.14132 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.867576+0000 mgr.vm02.ttibzz (mgr.14118) 11 : audit [DBG] from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:15.867576+0000 mgr.vm02.ttibzz (mgr.14118) 11 : audit [DBG] from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.867793+0000 mgr.vm02.ttibzz (mgr.14118) 12 : cephadm [INF] Generating ssh key... 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:15.867793+0000 mgr.vm02.ttibzz (mgr.14118) 12 : cephadm [INF] Generating ssh key... 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:16.145455+0000 mgr.vm02.ttibzz (mgr.14118) 13 : audit [DBG] from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:16.145455+0000 mgr.vm02.ttibzz (mgr.14118) 13 : audit [DBG] from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:16.426254+0000 mgr.vm02.ttibzz (mgr.14118) 14 : audit [DBG] from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm02", "addr": "192.168.123.102", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:16.426254+0000 mgr.vm02.ttibzz (mgr.14118) 14 : audit [DBG] from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm02", "addr": "192.168.123.102", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:16.892113+0000 mon.vm02 (mon.0) 62 : cluster [DBG] mgrmap e7: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:16.892113+0000 mon.vm02 (mon.0) 62 : cluster [DBG] mgrmap e7: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:16.989872+0000 mgr.vm02.ttibzz (mgr.14118) 15 : cephadm [INF] Deploying cephadm binary to vm02 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:16.989872+0000 mgr.vm02.ttibzz (mgr.14118) 15 : cephadm [INF] Deploying cephadm binary to vm02 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.287797+0000 mon.vm02 (mon.0) 63 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.287797+0000 mon.vm02 (mon.0) 63 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.288518+0000 mon.vm02 (mon.0) 64 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.288518+0000 mon.vm02 (mon.0) 64 : audit [DBG] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:18.288219+0000 mgr.vm02.ttibzz (mgr.14118) 16 : cephadm [INF] Added host vm02 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:18.288219+0000 mgr.vm02.ttibzz (mgr.14118) 16 : cephadm [INF] Added host vm02 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.619056+0000 mon.vm02 (mon.0) 65 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.619056+0000 mon.vm02 (mon.0) 65 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.885765+0000 mon.vm02 (mon.0) 66 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.885765+0000 mon.vm02 (mon.0) 66 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.088 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.173440+0000 mon.vm02 (mon.0) 67 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.173440+0000 mon.vm02 (mon.0) 67 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.615530+0000 mgr.vm02.ttibzz (mgr.14118) 17 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.615530+0000 mgr.vm02.ttibzz (mgr.14118) 17 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:18.616338+0000 mgr.vm02.ttibzz (mgr.14118) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:18.616338+0000 mgr.vm02.ttibzz (mgr.14118) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.882176+0000 mgr.vm02.ttibzz (mgr.14118) 19 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:18.882176+0000 mgr.vm02.ttibzz (mgr.14118) 19 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:18.882982+0000 mgr.vm02.ttibzz (mgr.14118) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:18.882982+0000 mgr.vm02.ttibzz (mgr.14118) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.168374+0000 mgr.vm02.ttibzz (mgr.14118) 21 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.168374+0000 mgr.vm02.ttibzz (mgr.14118) 21 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:19.169057+0000 mgr.vm02.ttibzz (mgr.14118) 22 : cephadm [INF] Saving service crash spec with placement * 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:19.169057+0000 mgr.vm02.ttibzz (mgr.14118) 22 : cephadm [INF] Saving service crash spec with placement * 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.467985+0000 mgr.vm02.ttibzz (mgr.14118) 23 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.467985+0000 mgr.vm02.ttibzz (mgr.14118) 23 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:19.468861+0000 mgr.vm02.ttibzz (mgr.14118) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:19.468861+0000 mgr.vm02.ttibzz (mgr.14118) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.471309+0000 mon.vm02 (mon.0) 68 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.471309+0000 mon.vm02 (mon.0) 68 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.799713+0000 mon.vm02 (mon.0) 69 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.799713+0000 mon.vm02 (mon.0) 69 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.836177+0000 mon.vm02 (mon.0) 70 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.836177+0000 mon.vm02 (mon.0) 70 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.146031+0000 mon.vm02 (mon.0) 71 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.146031+0000 mon.vm02 (mon.0) 71 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.247325+0000 mon.vm02 (mon.0) 72 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.247325+0000 mon.vm02 (mon.0) 72 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.795835+0000 mgr.vm02.ttibzz (mgr.14118) 25 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:19.795835+0000 mgr.vm02.ttibzz (mgr.14118) 25 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:19.796742+0000 mgr.vm02.ttibzz (mgr.14118) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:19.796742+0000 mgr.vm02.ttibzz (mgr.14118) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.140352+0000 mgr.vm02.ttibzz (mgr.14118) 27 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.140352+0000 mgr.vm02.ttibzz (mgr.14118) 27 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.089 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:20.141244+0000 mgr.vm02.ttibzz (mgr.14118) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:20.141244+0000 mgr.vm02.ttibzz (mgr.14118) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.545345+0000 mgr.vm02.ttibzz (mgr.14118) 29 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.545345+0000 mgr.vm02.ttibzz (mgr.14118) 29 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:20.546153+0000 mgr.vm02.ttibzz (mgr.14118) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:20.546153+0000 mgr.vm02.ttibzz (mgr.14118) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.549702+0000 mon.vm02 (mon.0) 73 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.549702+0000 mon.vm02 (mon.0) 73 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.846271+0000 mon.vm02 (mon.0) 74 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.846271+0000 mon.vm02 (mon.0) 74 : audit [INF] from='mgr.14118 192.168.123.102:0/2857905084' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:21.126813+0000 mon.vm02 (mon.0) 75 : audit [INF] from='client.? 192.168.123.102:0/1890823856' entity='client.admin' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:21.126813+0000 mon.vm02 (mon.0) 75 : audit [INF] from='client.? 192.168.123.102:0/1890823856' entity='client.admin' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:21.394331+0000 mon.vm02 (mon.0) 76 : audit [INF] from='client.? 192.168.123.102:0/2600197362' entity='client.admin' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:21.394331+0000 mon.vm02 (mon.0) 76 : audit [INF] from='client.? 192.168.123.102:0/2600197362' entity='client.admin' 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.841796+0000 mgr.vm02.ttibzz (mgr.14118) 31 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:20.841796+0000 mgr.vm02.ttibzz (mgr.14118) 31 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:20.842662+0000 mgr.vm02.ttibzz (mgr.14118) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:20.842662+0000 mgr.vm02.ttibzz (mgr.14118) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:21.702464+0000 mon.vm02 (mon.0) 77 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:21.702464+0000 mon.vm02 (mon.0) 77 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:22.552193+0000 mon.vm02 (mon.0) 78 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:22.552193+0000 mon.vm02 (mon.0) 78 : audit [INF] from='client.? 192.168.123.102:0/3190377713' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:22.557494+0000 mon.vm02 (mon.0) 79 : cluster [DBG] mgrmap e8: vm02.ttibzz(active, since 8s) 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:22.557494+0000 mon.vm02 (mon.0) 79 : cluster [DBG] mgrmap e8: vm02.ttibzz(active, since 8s) 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:22.972433+0000 mon.vm02 (mon.0) 80 : audit [DBG] from='client.? 192.168.123.102:0/3930567975' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:22.972433+0000 mon.vm02 (mon.0) 80 : audit [DBG] from='client.? 192.168.123.102:0/3930567975' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.890469+0000 mon.vm02 (mon.0) 81 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.890469+0000 mon.vm02 (mon.0) 81 : cluster [INF] Active manager daemon vm02.ttibzz restarted 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.890709+0000 mon.vm02 (mon.0) 82 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.890709+0000 mon.vm02 (mon.0) 82 : cluster [INF] Activating manager daemon vm02.ttibzz 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.896594+0000 mon.vm02 (mon.0) 83 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.896594+0000 mon.vm02 (mon.0) 83 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.896735+0000 mon.vm02 (mon.0) 84 : cluster [DBG] mgrmap e9: vm02.ttibzz(active, starting, since 0.00613587s) 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.896735+0000 mon.vm02 (mon.0) 84 : cluster [DBG] mgrmap e9: vm02.ttibzz(active, starting, since 0.00613587s) 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899065+0000 mon.vm02 (mon.0) 85 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899065+0000 mon.vm02 (mon.0) 85 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899141+0000 mon.vm02 (mon.0) 86 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899141+0000 mon.vm02 (mon.0) 86 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm02.ttibzz", "id": "vm02.ttibzz"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899500+0000 mon.vm02 (mon.0) 87 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899500+0000 mon.vm02 (mon.0) 87 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899558+0000 mon.vm02 (mon.0) 88 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899558+0000 mon.vm02 (mon.0) 88 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899604+0000 mon.vm02 (mon.0) 89 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.899604+0000 mon.vm02 (mon.0) 89 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.905263+0000 mon.vm02 (mon.0) 90 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:25.905263+0000 mon.vm02 (mon.0) 90 : cluster [INF] Manager daemon vm02.ttibzz is now available 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.921047+0000 mon.vm02 (mon.0) 91 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.921047+0000 mon.vm02 (mon.0) 91 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/mirror_snapshot_schedule"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.921779+0000 mon.vm02 (mon.0) 92 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.921779+0000 mon.vm02 (mon.0) 92 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm02.ttibzz/trash_purge_schedule"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.928585+0000 mon.vm02 (mon.0) 93 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:25.928585+0000 mon.vm02 (mon.0) 93 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:26.840125+0000 mgr.vm02.ttibzz (mgr.14162) 1 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Bus STARTING 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:26.840125+0000 mgr.vm02.ttibzz (mgr.14162) 1 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Bus STARTING 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:26.900274+0000 mon.vm02 (mon.0) 94 : cluster [DBG] mgrmap e10: vm02.ttibzz(active, since 1.00967s) 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:26.900274+0000 mon.vm02 (mon.0) 94 : cluster [DBG] mgrmap e10: vm02.ttibzz(active, since 1.00967s) 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:26.901345+0000 mgr.vm02.ttibzz (mgr.14162) 2 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:26.901345+0000 mgr.vm02.ttibzz (mgr.14162) 2 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:26.905161+0000 mgr.vm02.ttibzz (mgr.14162) 3 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:26.905161+0000 mgr.vm02.ttibzz (mgr.14162) 3 : audit [DBG] from='client.14166 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:26.942110+0000 mgr.vm02.ttibzz (mgr.14162) 4 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:36:16.090 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:26.942110+0000 mgr.vm02.ttibzz (mgr.14162) 4 : cephadm [INF] [10/Mar/2026:08:35:26] ENGINE Serving on http://192.168.123.102:8765 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:27.053675+0000 mgr.vm02.ttibzz (mgr.14162) 5 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:27.053675+0000 mgr.vm02.ttibzz (mgr.14162) 5 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Serving on https://192.168.123.102:7150 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:27.053713+0000 mgr.vm02.ttibzz (mgr.14162) 6 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Bus STARTED 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:27.053713+0000 mgr.vm02.ttibzz (mgr.14162) 6 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Bus STARTED 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:27.054109+0000 mgr.vm02.ttibzz (mgr.14162) 7 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Client ('192.168.123.102', 40748) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:27.054109+0000 mgr.vm02.ttibzz (mgr.14162) 7 : cephadm [INF] [10/Mar/2026:08:35:27] ENGINE Client ('192.168.123.102', 40748) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.160790+0000 mgr.vm02.ttibzz (mgr.14162) 8 : audit [DBG] from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.160790+0000 mgr.vm02.ttibzz (mgr.14162) 8 : audit [DBG] from='client.14174 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.228502+0000 mon.vm02 (mon.0) 95 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.228502+0000 mon.vm02 (mon.0) 95 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.231354+0000 mon.vm02 (mon.0) 96 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.231354+0000 mon.vm02 (mon.0) 96 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.500881+0000 mgr.vm02.ttibzz (mgr.14162) 9 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.500881+0000 mgr.vm02.ttibzz (mgr.14162) 9 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.657200+0000 mon.vm02 (mon.0) 97 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.657200+0000 mon.vm02 (mon.0) 97 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.956148+0000 mon.vm02 (mon.0) 98 : audit [DBG] from='client.? 192.168.123.102:0/2217454896' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:27.956148+0000 mon.vm02 (mon.0) 98 : audit [DBG] from='client.? 192.168.123.102:0/2217454896' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:28.284369+0000 mon.vm02 (mon.0) 99 : audit [INF] from='client.? 192.168.123.102:0/196270727' entity='client.admin' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:28.284369+0000 mon.vm02 (mon.0) 99 : audit [INF] from='client.? 192.168.123.102:0/196270727' entity='client.admin' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:28.660467+0000 mon.vm02 (mon.0) 100 : cluster [DBG] mgrmap e11: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:28.660467+0000 mon.vm02 (mon.0) 100 : cluster [DBG] mgrmap e11: vm02.ttibzz(active, since 2s) 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:30.976401+0000 mon.vm02 (mon.0) 101 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:30.976401+0000 mon.vm02 (mon.0) 101 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:31.586671+0000 mon.vm02 (mon.0) 102 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:31.586671+0000 mon.vm02 (mon.0) 102 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:32.592481+0000 mon.vm02 (mon.0) 103 : cluster [DBG] mgrmap e12: vm02.ttibzz(active, since 6s) 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cluster 2026-03-10T08:35:32.592481+0000 mon.vm02 (mon.0) 103 : cluster [DBG] mgrmap e12: vm02.ttibzz(active, since 6s) 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:33.103116+0000 mon.vm02 (mon.0) 104 : audit [INF] from='client.? 192.168.123.102:0/796802570' entity='client.admin' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:33.103116+0000 mon.vm02 (mon.0) 104 : audit [INF] from='client.? 192.168.123.102:0/796802570' entity='client.admin' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.375103+0000 mon.vm02 (mon.0) 105 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.375103+0000 mon.vm02 (mon.0) 105 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.378012+0000 mon.vm02 (mon.0) 106 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.378012+0000 mon.vm02 (mon.0) 106 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.378750+0000 mon.vm02 (mon.0) 107 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.378750+0000 mon.vm02 (mon.0) 107 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.381787+0000 mon.vm02 (mon.0) 108 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.381787+0000 mon.vm02 (mon.0) 108 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.382670+0000 mon.vm02 (mon.0) 109 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.382670+0000 mon.vm02 (mon.0) 109 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.383589+0000 mon.vm02 (mon.0) 110 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.383589+0000 mon.vm02 (mon.0) 110 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.384950+0000 mon.vm02 (mon.0) 111 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:37.384950+0000 mon.vm02 (mon.0) 111 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:37.385498+0000 mgr.vm02.ttibzz (mgr.14162) 10 : cephadm [INF] Deploying daemon ceph-exporter.vm02 on vm02 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:37.385498+0000 mgr.vm02.ttibzz (mgr.14162) 10 : cephadm [INF] Deploying daemon ceph-exporter.vm02 on vm02 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.204582+0000 mon.vm02 (mon.0) 112 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.204582+0000 mon.vm02 (mon.0) 112 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.208477+0000 mon.vm02 (mon.0) 113 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.208477+0000 mon.vm02 (mon.0) 113 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.214161+0000 mon.vm02 (mon.0) 114 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.214161+0000 mon.vm02 (mon.0) 114 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.219335+0000 mon.vm02 (mon.0) 115 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.219335+0000 mon.vm02 (mon.0) 115 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.221962+0000 mon.vm02 (mon.0) 116 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.221962+0000 mon.vm02 (mon.0) 116 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.223192+0000 mon.vm02 (mon.0) 117 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.223192+0000 mon.vm02 (mon.0) 117 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.225551+0000 mon.vm02 (mon.0) 118 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.225551+0000 mon.vm02 (mon.0) 118 : audit [DBG] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:16.091 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:38.226227+0000 mgr.vm02.ttibzz (mgr.14162) 11 : cephadm [INF] Deploying daemon crash.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:38.226227+0000 mgr.vm02.ttibzz (mgr.14162) 11 : cephadm [INF] Deploying daemon crash.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.413006+0000 mgr.vm02.ttibzz (mgr.14162) 12 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.413006+0000 mgr.vm02.ttibzz (mgr.14162) 12 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.416184+0000 mon.vm02 (mon.0) 119 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:38.416184+0000 mon.vm02 (mon.0) 119 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.180251+0000 mon.vm02 (mon.0) 120 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.180251+0000 mon.vm02 (mon.0) 120 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.189138+0000 mon.vm02 (mon.0) 121 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.189138+0000 mon.vm02 (mon.0) 121 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.192926+0000 mon.vm02 (mon.0) 122 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.192926+0000 mon.vm02 (mon.0) 122 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.200628+0000 mon.vm02 (mon.0) 123 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.200628+0000 mon.vm02 (mon.0) 123 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:39.201969+0000 mgr.vm02.ttibzz (mgr.14162) 13 : cephadm [INF] Deploying daemon node-exporter.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:39.201969+0000 mgr.vm02.ttibzz (mgr.14162) 13 : cephadm [INF] Deploying daemon node-exporter.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.936531+0000 mon.vm02 (mon.0) 124 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.936531+0000 mon.vm02 (mon.0) 124 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.940762+0000 mon.vm02 (mon.0) 125 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.940762+0000 mon.vm02 (mon.0) 125 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.944041+0000 mon.vm02 (mon.0) 126 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.944041+0000 mon.vm02 (mon.0) 126 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.946953+0000 mon.vm02 (mon.0) 127 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:39.946953+0000 mon.vm02 (mon.0) 127 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:39.952093+0000 mgr.vm02.ttibzz (mgr.14162) 14 : cephadm [INF] Deploying daemon alertmanager.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:39.952093+0000 mgr.vm02.ttibzz (mgr.14162) 14 : cephadm [INF] Deploying daemon alertmanager.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:40.954989+0000 mon.vm02 (mon.0) 128 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:40.954989+0000 mon.vm02 (mon.0) 128 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.424011+0000 mon.vm02 (mon.0) 129 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.424011+0000 mon.vm02 (mon.0) 129 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.428108+0000 mon.vm02 (mon.0) 130 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.428108+0000 mon.vm02 (mon.0) 130 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.431104+0000 mon.vm02 (mon.0) 131 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.431104+0000 mon.vm02 (mon.0) 131 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.434568+0000 mon.vm02 (mon.0) 132 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.434568+0000 mon.vm02 (mon.0) 132 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.438436+0000 mon.vm02 (mon.0) 133 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.438436+0000 mon.vm02 (mon.0) 133 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.449174+0000 mon.vm02 (mon.0) 134 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.449174+0000 mon.vm02 (mon.0) 134 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:44.453671+0000 mgr.vm02.ttibzz (mgr.14162) 15 : cephadm [INF] Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:44.453671+0000 mgr.vm02.ttibzz (mgr.14162) 15 : cephadm [INF] Regenerating cephadm self-signed grafana TLS certificates 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.520528+0000 mon.vm02 (mon.0) 135 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.520528+0000 mon.vm02 (mon.0) 135 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.524271+0000 mon.vm02 (mon.0) 136 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.524271+0000 mon.vm02 (mon.0) 136 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.526514+0000 mon.vm02 (mon.0) 137 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.526514+0000 mon.vm02 (mon.0) 137 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.526942+0000 mgr.vm02.ttibzz (mgr.14162) 16 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.526942+0000 mgr.vm02.ttibzz (mgr.14162) 16 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.533416+0000 mon.vm02 (mon.0) 138 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.533416+0000 mon.vm02 (mon.0) 138 : audit [INF] from='mgr.14162 192.168.123.102:0/2173644391' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:44.541013+0000 mgr.vm02.ttibzz (mgr.14162) 17 : cephadm [INF] Deploying daemon grafana.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:44.541013+0000 mgr.vm02.ttibzz (mgr.14162) 17 : cephadm [INF] Deploying daemon grafana.vm02 on vm02 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.811844+0000 mgr.vm02.ttibzz (mgr.14162) 18 : audit [DBG] from='client.14187 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:44.811844+0000 mgr.vm02.ttibzz (mgr.14162) 18 : audit [DBG] from='client.14187 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm07", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:45.354122+0000 mgr.vm02.ttibzz (mgr.14162) 19 : cephadm [INF] Deploying cephadm binary to vm07 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: cephadm 2026-03-10T08:35:45.354122+0000 mgr.vm02.ttibzz (mgr.14162) 19 : cephadm [INF] Deploying cephadm binary to vm07 2026-03-10T08:36:16.092 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:36:15 vm07 bash[20988]: audit 2026-03-10T08:35:45.961626+0000 mon.vm02 (mon.0) 139 : audit [INF] debug 2026-03-10T08:36:15.918+0000 7f94e6833640 1 mon.vm07@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-03-10T08:36:16.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: cephadm 2026-03-10T08:36:14.368095+0000 mgr.vm02.ttibzz (mgr.14195) 18 : cephadm [INF] Deploying daemon mon.vm07 on vm07 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: cephadm 2026-03-10T08:36:14.368095+0000 mgr.vm02.ttibzz (mgr.14195) 18 : cephadm [INF] Deploying daemon mon.vm07 on vm07 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.508433+0000 mon.vm02 (mon.0) 229 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.508433+0000 mon.vm02 (mon.0) 229 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.511783+0000 mon.vm02 (mon.0) 230 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.511783+0000 mon.vm02 (mon.0) 230 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.515805+0000 mon.vm02 (mon.0) 231 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.515805+0000 mon.vm02 (mon.0) 231 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.520232+0000 mon.vm02 (mon.0) 232 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.520232+0000 mon.vm02 (mon.0) 232 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.523918+0000 mon.vm02 (mon.0) 233 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.523918+0000 mon.vm02 (mon.0) 233 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.534880+0000 mon.vm02 (mon.0) 234 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:16.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:15 vm02 bash[17473]: audit 2026-03-10T08:36:15.534880+0000 mon.vm02 (mon.0) 234 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:20.519 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm07/config 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:15.929701+0000 mon.vm02 (mon.0) 236 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:15.929701+0000 mon.vm02 (mon.0) 236 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:15.929936+0000 mon.vm02 (mon.0) 237 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:15.929936+0000 mon.vm02 (mon.0) 237 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: cluster 2026-03-10T08:36:15.930134+0000 mon.vm02 (mon.0) 238 : cluster [INF] mon.vm02 calling monitor election 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: cluster 2026-03-10T08:36:15.930134+0000 mon.vm02 (mon.0) 238 : cluster [INF] mon.vm02 calling monitor election 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:16.925264+0000 mon.vm02 (mon.0) 239 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:16.925264+0000 mon.vm02 (mon.0) 239 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:17.924767+0000 mon.vm02 (mon.0) 240 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:20 vm02 bash[17473]: audit 2026-03-10T08:36:17.924767+0000 mon.vm02 (mon.0) 240 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:17.930342+0000 mon.vm07 (mon.1) 1 : cluster [INF] mon.vm07 calling monitor election 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:17.930342+0000 mon.vm07 (mon.1) 1 : cluster [INF] mon.vm07 calling monitor election 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:18.142482+0000 mon.vm02 (mon.0) 241 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.aunzpk/crt"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:18.142482+0000 mon.vm02 (mon.0) 241 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.aunzpk/crt"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:18.923764+0000 mon.vm02 (mon.0) 242 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:18.923764+0000 mon.vm02 (mon.0) 242 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:19.234549+0000 mon.vm02 (mon.0) 243 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:19.234549+0000 mon.vm02 (mon.0) 243 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:19.923781+0000 mon.vm02 (mon.0) 244 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:19.923781+0000 mon.vm02 (mon.0) 244 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.924202+0000 mon.vm02 (mon.0) 245 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.924202+0000 mon.vm02 (mon.0) 245 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.934873+0000 mon.vm02 (mon.0) 246 : cluster [INF] mon.vm02 is new leader, mons vm02,vm07 in quorum (ranks 0,1) 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.934873+0000 mon.vm02 (mon.0) 246 : cluster [INF] mon.vm02 is new leader, mons vm02,vm07 in quorum (ranks 0,1) 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938200+0000 mon.vm02 (mon.0) 247 : cluster [DBG] monmap epoch 2 2026-03-10T08:36:21.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938200+0000 mon.vm02 (mon.0) 247 : cluster [DBG] monmap epoch 2 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938216+0000 mon.vm02 (mon.0) 248 : cluster [DBG] fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938216+0000 mon.vm02 (mon.0) 248 : cluster [DBG] fsid e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938226+0000 mon.vm02 (mon.0) 249 : cluster [DBG] last_changed 2026-03-10T08:36:15.925983+0000 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938226+0000 mon.vm02 (mon.0) 249 : cluster [DBG] last_changed 2026-03-10T08:36:15.925983+0000 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938237+0000 mon.vm02 (mon.0) 250 : cluster [DBG] created 2026-03-10T08:35:02.208022+0000 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938237+0000 mon.vm02 (mon.0) 250 : cluster [DBG] created 2026-03-10T08:35:02.208022+0000 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938247+0000 mon.vm02 (mon.0) 251 : cluster [DBG] min_mon_release 19 (squid) 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938247+0000 mon.vm02 (mon.0) 251 : cluster [DBG] min_mon_release 19 (squid) 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938257+0000 mon.vm02 (mon.0) 252 : cluster [DBG] election_strategy: 1 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938257+0000 mon.vm02 (mon.0) 252 : cluster [DBG] election_strategy: 1 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938267+0000 mon.vm02 (mon.0) 253 : cluster [DBG] 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938267+0000 mon.vm02 (mon.0) 253 : cluster [DBG] 0: [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] mon.vm02 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938276+0000 mon.vm02 (mon.0) 254 : cluster [DBG] 1: [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] mon.vm07 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938276+0000 mon.vm02 (mon.0) 254 : cluster [DBG] 1: [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] mon.vm07 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938535+0000 mon.vm02 (mon.0) 255 : cluster [DBG] fsmap 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938535+0000 mon.vm02 (mon.0) 255 : cluster [DBG] fsmap 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938556+0000 mon.vm02 (mon.0) 256 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938556+0000 mon.vm02 (mon.0) 256 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938696+0000 mon.vm02 (mon.0) 257 : cluster [DBG] mgrmap e16: vm02.ttibzz(active, since 16s) 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938696+0000 mon.vm02 (mon.0) 257 : cluster [DBG] mgrmap e16: vm02.ttibzz(active, since 16s) 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938776+0000 mon.vm02 (mon.0) 258 : cluster [INF] overall HEALTH_OK 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938776+0000 mon.vm02 (mon.0) 258 : cluster [INF] overall HEALTH_OK 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938925+0000 mon.vm02 (mon.0) 259 : cluster [DBG] Standby manager daemon vm07.aunzpk started 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: cluster 2026-03-10T08:36:20.938925+0000 mon.vm02 (mon.0) 259 : cluster [DBG] Standby manager daemon vm07.aunzpk started 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.940520+0000 mon.vm02 (mon.0) 260 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.940520+0000 mon.vm02 (mon.0) 260 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.941473+0000 mon.vm02 (mon.0) 261 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.aunzpk/key"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.941473+0000 mon.vm02 (mon.0) 261 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm07.aunzpk/key"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.941741+0000 mon.vm02 (mon.0) 262 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.941741+0000 mon.vm02 (mon.0) 262 : audit [DBG] from='mgr.? 192.168.123.107:0/3295739687' entity='mgr.vm07.aunzpk' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.945122+0000 mon.vm02 (mon.0) 263 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.945122+0000 mon.vm02 (mon.0) 263 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.948215+0000 mon.vm02 (mon.0) 264 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.948215+0000 mon.vm02 (mon.0) 264 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.952166+0000 mon.vm02 (mon.0) 265 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.952166+0000 mon.vm02 (mon.0) 265 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.953040+0000 mon.vm02 (mon.0) 266 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.953040+0000 mon.vm02 (mon.0) 266 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.953804+0000 mon.vm02 (mon.0) 267 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:21.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:21 vm02 bash[17473]: audit 2026-03-10T08:36:20.953804+0000 mon.vm02 (mon.0) 267 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:21.455 INFO:teuthology.orchestra.run.vm07.stdout: 2026-03-10T08:36:21.455 INFO:teuthology.orchestra.run.vm07.stdout:{"epoch":2,"fsid":"e750d050-1c5b-11f1-9e63-531fde0192f6","modified":"2026-03-10T08:36:15.925983Z","created":"2026-03-10T08:35:02.208022Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm02","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:3300","nonce":0},{"type":"v1","addr":"192.168.123.102:6789","nonce":0}]},"addr":"192.168.123.102:6789/0","public_addr":"192.168.123.102:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm07","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:3300","nonce":0},{"type":"v1","addr":"192.168.123.107:6789","nonce":0}]},"addr":"192.168.123.107:6789/0","public_addr":"192.168.123.107:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-10T08:36:21.455 INFO:teuthology.orchestra.run.vm07.stderr:dumped monmap epoch 2 2026-03-10T08:36:21.515 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-10T08:36:21.515 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph config generate-minimal-conf 2026-03-10T08:36:22.180 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:20.954579+0000 mgr.vm02.ttibzz (mgr.14195) 19 : cephadm [INF] Updating vm02:/etc/ceph/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:20.954579+0000 mgr.vm02.ttibzz (mgr.14195) 19 : cephadm [INF] Updating vm02:/etc/ceph/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:20.954680+0000 mgr.vm02.ttibzz (mgr.14195) 20 : cephadm [INF] Updating vm07:/etc/ceph/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:20.954680+0000 mgr.vm02.ttibzz (mgr.14195) 20 : cephadm [INF] Updating vm07:/etc/ceph/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:20.991745+0000 mgr.vm02.ttibzz (mgr.14195) 21 : cephadm [INF] Updating vm02:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:20.991745+0000 mgr.vm02.ttibzz (mgr.14195) 21 : cephadm [INF] Updating vm02:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.011308+0000 mgr.vm02.ttibzz (mgr.14195) 22 : cephadm [INF] Updating vm07:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.011308+0000 mgr.vm02.ttibzz (mgr.14195) 22 : cephadm [INF] Updating vm07:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/config/ceph.conf 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cluster 2026-03-10T08:36:21.036740+0000 mon.vm02 (mon.0) 268 : cluster [DBG] mgrmap e17: vm02.ttibzz(active, since 16s), standbys: vm07.aunzpk 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cluster 2026-03-10T08:36:21.036740+0000 mon.vm02 (mon.0) 268 : cluster [DBG] mgrmap e17: vm02.ttibzz(active, since 16s), standbys: vm07.aunzpk 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.036904+0000 mon.vm02 (mon.0) 269 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm07.aunzpk", "id": "vm07.aunzpk"}]: dispatch 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.036904+0000 mon.vm02 (mon.0) 269 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr metadata", "who": "vm07.aunzpk", "id": "vm07.aunzpk"}]: dispatch 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.061184+0000 mon.vm02 (mon.0) 270 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.061184+0000 mon.vm02 (mon.0) 270 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.065613+0000 mon.vm02 (mon.0) 271 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.065613+0000 mon.vm02 (mon.0) 271 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.070102+0000 mon.vm02 (mon.0) 272 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.070102+0000 mon.vm02 (mon.0) 272 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.074019+0000 mon.vm02 (mon.0) 273 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.074019+0000 mon.vm02 (mon.0) 273 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.077241+0000 mon.vm02 (mon.0) 274 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.077241+0000 mon.vm02 (mon.0) 274 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.087243+0000 mgr.vm02.ttibzz (mgr.14195) 23 : cephadm [INF] Reconfiguring grafana.vm02 (dependencies changed)... 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.087243+0000 mgr.vm02.ttibzz (mgr.14195) 23 : cephadm [INF] Reconfiguring grafana.vm02 (dependencies changed)... 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.123214+0000 mgr.vm02.ttibzz (mgr.14195) 24 : cephadm [INF] Reconfiguring daemon grafana.vm02 on vm02 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.123214+0000 mgr.vm02.ttibzz (mgr.14195) 24 : cephadm [INF] Reconfiguring daemon grafana.vm02 on vm02 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.451315+0000 mon.vm02 (mon.0) 275 : audit [DBG] from='client.? 192.168.123.107:0/1722530882' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.451315+0000 mon.vm02 (mon.0) 275 : audit [DBG] from='client.? 192.168.123.107:0/1722530882' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.810216+0000 mon.vm02 (mon.0) 276 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.810216+0000 mon.vm02 (mon.0) 276 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.815242+0000 mon.vm02 (mon.0) 277 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.815242+0000 mon.vm02 (mon.0) 277 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.816235+0000 mgr.vm02.ttibzz (mgr.14195) 25 : cephadm [INF] Reconfiguring alertmanager.vm02 (dependencies changed)... 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.816235+0000 mgr.vm02.ttibzz (mgr.14195) 25 : cephadm [INF] Reconfiguring alertmanager.vm02 (dependencies changed)... 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.820880+0000 mgr.vm02.ttibzz (mgr.14195) 26 : cephadm [INF] Reconfiguring daemon alertmanager.vm02 on vm02 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: cephadm 2026-03-10T08:36:21.820880+0000 mgr.vm02.ttibzz (mgr.14195) 26 : cephadm [INF] Reconfiguring daemon alertmanager.vm02 on vm02 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.924246+0000 mon.vm02 (mon.0) 278 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:22.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:22 vm02 bash[17473]: audit 2026-03-10T08:36:21.924246+0000 mon.vm02 (mon.0) 278 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.588455+0000 mon.vm02 (mon.0) 279 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.588455+0000 mon.vm02 (mon.0) 279 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.596391+0000 mon.vm02 (mon.0) 280 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.596391+0000 mon.vm02 (mon.0) 280 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: cephadm 2026-03-10T08:36:22.597170+0000 mgr.vm02.ttibzz (mgr.14195) 27 : cephadm [INF] Reconfiguring mon.vm02 (unknown last config time)... 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: cephadm 2026-03-10T08:36:22.597170+0000 mgr.vm02.ttibzz (mgr.14195) 27 : cephadm [INF] Reconfiguring mon.vm02 (unknown last config time)... 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.597389+0000 mon.vm02 (mon.0) 281 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.597389+0000 mon.vm02 (mon.0) 281 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.597864+0000 mon.vm02 (mon.0) 282 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.597864+0000 mon.vm02 (mon.0) 282 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.598291+0000 mon.vm02 (mon.0) 283 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:22.598291+0000 mon.vm02 (mon.0) 283 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:23.905 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: cephadm 2026-03-10T08:36:22.598803+0000 mgr.vm02.ttibzz (mgr.14195) 28 : cephadm [INF] Reconfiguring daemon mon.vm02 on vm02 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: cephadm 2026-03-10T08:36:22.598803+0000 mgr.vm02.ttibzz (mgr.14195) 28 : cephadm [INF] Reconfiguring daemon mon.vm02 on vm02 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.015762+0000 mon.vm02 (mon.0) 284 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.015762+0000 mon.vm02 (mon.0) 284 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.021347+0000 mon.vm02 (mon.0) 285 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.021347+0000 mon.vm02 (mon.0) 285 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.022663+0000 mon.vm02 (mon.0) 286 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.022663+0000 mon.vm02 (mon.0) 286 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm02", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.023299+0000 mon.vm02 (mon.0) 287 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.023299+0000 mon.vm02 (mon.0) 287 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.423873+0000 mon.vm02 (mon.0) 288 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.423873+0000 mon.vm02 (mon.0) 288 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.428650+0000 mon.vm02 (mon.0) 289 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:23.906 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:23 vm02 bash[17473]: audit 2026-03-10T08:36:23.428650+0000 mon.vm02 (mon.0) 289 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.022366+0000 mgr.vm02.ttibzz (mgr.14195) 29 : cephadm [INF] Reconfiguring crash.vm02 (monmap changed)... 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.022366+0000 mgr.vm02.ttibzz (mgr.14195) 29 : cephadm [INF] Reconfiguring crash.vm02 (monmap changed)... 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.023949+0000 mgr.vm02.ttibzz (mgr.14195) 30 : cephadm [INF] Reconfiguring daemon crash.vm02 on vm02 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.023949+0000 mgr.vm02.ttibzz (mgr.14195) 30 : cephadm [INF] Reconfiguring daemon crash.vm02 on vm02 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.429582+0000 mgr.vm02.ttibzz (mgr.14195) 31 : cephadm [INF] Reconfiguring prometheus.vm02 (dependencies changed)... 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.429582+0000 mgr.vm02.ttibzz (mgr.14195) 31 : cephadm [INF] Reconfiguring prometheus.vm02 (dependencies changed)... 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.598359+0000 mgr.vm02.ttibzz (mgr.14195) 32 : cephadm [INF] Reconfiguring daemon prometheus.vm02 on vm02 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: cephadm 2026-03-10T08:36:23.598359+0000 mgr.vm02.ttibzz (mgr.14195) 32 : cephadm [INF] Reconfiguring daemon prometheus.vm02 on vm02 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.225326+0000 mon.vm02 (mon.0) 290 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.225326+0000 mon.vm02 (mon.0) 290 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.230353+0000 mon.vm02 (mon.0) 291 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.230353+0000 mon.vm02 (mon.0) 291 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.232293+0000 mon.vm02 (mon.0) 292 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm02.ttibzz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.232293+0000 mon.vm02 (mon.0) 292 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm02.ttibzz", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.232946+0000 mon.vm02 (mon.0) 293 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.232946+0000 mon.vm02 (mon.0) 293 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.233381+0000 mon.vm02 (mon.0) 294 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:24.736 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:24 vm02 bash[17473]: audit 2026-03-10T08:36:24.233381+0000 mon.vm02 (mon.0) 294 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cluster 2026-03-10T08:36:24.193358+0000 mgr.vm02.ttibzz (mgr.14195) 33 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cluster 2026-03-10T08:36:24.193358+0000 mgr.vm02.ttibzz (mgr.14195) 33 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.231955+0000 mgr.vm02.ttibzz (mgr.14195) 34 : cephadm [INF] Reconfiguring mgr.vm02.ttibzz (unknown last config time)... 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.231955+0000 mgr.vm02.ttibzz (mgr.14195) 34 : cephadm [INF] Reconfiguring mgr.vm02.ttibzz (unknown last config time)... 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.233895+0000 mgr.vm02.ttibzz (mgr.14195) 35 : cephadm [INF] Reconfiguring daemon mgr.vm02.ttibzz on vm02 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.233895+0000 mgr.vm02.ttibzz (mgr.14195) 35 : cephadm [INF] Reconfiguring daemon mgr.vm02.ttibzz on vm02 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.656464+0000 mon.vm02 (mon.0) 295 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.656464+0000 mon.vm02 (mon.0) 295 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.662196+0000 mon.vm02 (mon.0) 296 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.662196+0000 mon.vm02 (mon.0) 296 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.663166+0000 mgr.vm02.ttibzz (mgr.14195) 36 : cephadm [INF] Reconfiguring ceph-exporter.vm02 (monmap changed)... 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.663166+0000 mgr.vm02.ttibzz (mgr.14195) 36 : cephadm [INF] Reconfiguring ceph-exporter.vm02 (monmap changed)... 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.663877+0000 mon.vm02 (mon.0) 297 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.663877+0000 mon.vm02 (mon.0) 297 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm02", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.664602+0000 mon.vm02 (mon.0) 298 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:24.664602+0000 mon.vm02 (mon.0) 298 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.665353+0000 mgr.vm02.ttibzz (mgr.14195) 37 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm02 on vm02 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: cephadm 2026-03-10T08:36:24.665353+0000 mgr.vm02.ttibzz (mgr.14195) 37 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm02 on vm02 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.089914+0000 mon.vm02 (mon.0) 299 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.089914+0000 mon.vm02 (mon.0) 299 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.096272+0000 mon.vm02 (mon.0) 300 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.096272+0000 mon.vm02 (mon.0) 300 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.097738+0000 mon.vm02 (mon.0) 301 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.097738+0000 mon.vm02 (mon.0) 301 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.098436+0000 mon.vm02 (mon.0) 302 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.098436+0000 mon.vm02 (mon.0) 302 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.099000+0000 mon.vm02 (mon.0) 303 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.099000+0000 mon.vm02 (mon.0) 303 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.477821+0000 mon.vm02 (mon.0) 304 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.477821+0000 mon.vm02 (mon.0) 304 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.483618+0000 mon.vm02 (mon.0) 305 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.483618+0000 mon.vm02 (mon.0) 305 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.484741+0000 mon.vm02 (mon.0) 306 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm07.aunzpk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.484741+0000 mon.vm02 (mon.0) 306 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm07.aunzpk", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.485361+0000 mon.vm02 (mon.0) 307 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.485361+0000 mon.vm02 (mon.0) 307 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.485800+0000 mon.vm02 (mon.0) 308 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:25 vm02 bash[17473]: audit 2026-03-10T08:36:25.485800+0000 mon.vm02 (mon.0) 308 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.097478+0000 mgr.vm02.ttibzz (mgr.14195) 38 : cephadm [INF] Reconfiguring mon.vm07 (monmap changed)... 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.097478+0000 mgr.vm02.ttibzz (mgr.14195) 38 : cephadm [INF] Reconfiguring mon.vm07 (monmap changed)... 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.099780+0000 mgr.vm02.ttibzz (mgr.14195) 39 : cephadm [INF] Reconfiguring daemon mon.vm07 on vm07 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.099780+0000 mgr.vm02.ttibzz (mgr.14195) 39 : cephadm [INF] Reconfiguring daemon mon.vm07 on vm07 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.484394+0000 mgr.vm02.ttibzz (mgr.14195) 40 : cephadm [INF] Reconfiguring mgr.vm07.aunzpk (monmap changed)... 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.484394+0000 mgr.vm02.ttibzz (mgr.14195) 40 : cephadm [INF] Reconfiguring mgr.vm07.aunzpk (monmap changed)... 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.486415+0000 mgr.vm02.ttibzz (mgr.14195) 41 : cephadm [INF] Reconfiguring daemon mgr.vm07.aunzpk on vm07 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.486415+0000 mgr.vm02.ttibzz (mgr.14195) 41 : cephadm [INF] Reconfiguring daemon mgr.vm07.aunzpk on vm07 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.861021+0000 mon.vm02 (mon.0) 309 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.861021+0000 mon.vm02 (mon.0) 309 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.864329+0000 mon.vm02 (mon.0) 310 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.864329+0000 mon.vm02 (mon.0) 310 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.865161+0000 mgr.vm02.ttibzz (mgr.14195) 42 : cephadm [INF] Reconfiguring crash.vm07 (monmap changed)... 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.865161+0000 mgr.vm02.ttibzz (mgr.14195) 42 : cephadm [INF] Reconfiguring crash.vm07 (monmap changed)... 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.865488+0000 mon.vm02 (mon.0) 311 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.865488+0000 mon.vm02 (mon.0) 311 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm07", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.866206+0000 mon.vm02 (mon.0) 312 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:25.866206+0000 mon.vm02 (mon.0) 312 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.866819+0000 mgr.vm02.ttibzz (mgr.14195) 43 : cephadm [INF] Reconfiguring daemon crash.vm07 on vm07 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: cephadm 2026-03-10T08:36:25.866819+0000 mgr.vm02.ttibzz (mgr.14195) 43 : cephadm [INF] Reconfiguring daemon crash.vm07 on vm07 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.306730+0000 mon.vm02 (mon.0) 313 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.306730+0000 mon.vm02 (mon.0) 313 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.311164+0000 mon.vm02 (mon.0) 314 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.311164+0000 mon.vm02 (mon.0) 314 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.311976+0000 mon.vm02 (mon.0) 315 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.311976+0000 mon.vm02 (mon.0) 315 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm07", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-10T08:36:27.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.312516+0000 mon.vm02 (mon.0) 316 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.312516+0000 mon.vm02 (mon.0) 316 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.697988+0000 mon.vm02 (mon.0) 317 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.697988+0000 mon.vm02 (mon.0) 317 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.701316+0000 mon.vm02 (mon.0) 318 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.701316+0000 mon.vm02 (mon.0) 318 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.703733+0000 mon.vm02 (mon.0) 319 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.703733+0000 mon.vm02 (mon.0) 319 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.704603+0000 mon.vm02 (mon.0) 320 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.704603+0000 mon.vm02 (mon.0) 320 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.707208+0000 mon.vm02 (mon.0) 321 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.707208+0000 mon.vm02 (mon.0) 321 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.714816+0000 mon.vm02 (mon.0) 322 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.714816+0000 mon.vm02 (mon.0) 322 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.715445+0000 mon.vm02 (mon.0) 323 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.715445+0000 mon.vm02 (mon.0) 323 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.717870+0000 mon.vm02 (mon.0) 324 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.717870+0000 mon.vm02 (mon.0) 324 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.723345+0000 mon.vm02 (mon.0) 325 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.723345+0000 mon.vm02 (mon.0) 325 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.724013+0000 mon.vm02 (mon.0) 326 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.724013+0000 mon.vm02 (mon.0) 326 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.726303+0000 mon.vm02 (mon.0) 327 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.726303+0000 mon.vm02 (mon.0) 327 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.757646+0000 mon.vm02 (mon.0) 328 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:27.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:26 vm02 bash[17473]: audit 2026-03-10T08:36:26.757646+0000 mon.vm02 (mon.0) 328 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:28.190 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: cluster 2026-03-10T08:36:26.193555+0000 mgr.vm02.ttibzz (mgr.14195) 44 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: cluster 2026-03-10T08:36:26.193555+0000 mgr.vm02.ttibzz (mgr.14195) 44 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: cephadm 2026-03-10T08:36:26.311767+0000 mgr.vm02.ttibzz (mgr.14195) 45 : cephadm [INF] Reconfiguring ceph-exporter.vm07 (monmap changed)... 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: cephadm 2026-03-10T08:36:26.311767+0000 mgr.vm02.ttibzz (mgr.14195) 45 : cephadm [INF] Reconfiguring ceph-exporter.vm07 (monmap changed)... 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: cephadm 2026-03-10T08:36:26.312984+0000 mgr.vm02.ttibzz (mgr.14195) 46 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm07 on vm07 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: cephadm 2026-03-10T08:36:26.312984+0000 mgr.vm02.ttibzz (mgr.14195) 46 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm07 on vm07 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.704027+0000 mgr.vm02.ttibzz (mgr.14195) 47 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.704027+0000 mgr.vm02.ttibzz (mgr.14195) 47 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.704762+0000 mgr.vm02.ttibzz (mgr.14195) 48 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.704762+0000 mgr.vm02.ttibzz (mgr.14195) 48 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm02.local:3000"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.714969+0000 mgr.vm02.ttibzz (mgr.14195) 49 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.714969+0000 mgr.vm02.ttibzz (mgr.14195) 49 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.715623+0000 mgr.vm02.ttibzz (mgr.14195) 50 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.715623+0000 mgr.vm02.ttibzz (mgr.14195) 50 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm02.local:9093"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.723571+0000 mgr.vm02.ttibzz (mgr.14195) 51 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.723571+0000 mgr.vm02.ttibzz (mgr.14195) 51 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.724169+0000 mgr.vm02.ttibzz (mgr.14195) 52 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-10T08:36:28.205 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:27 vm02 bash[17473]: audit 2026-03-10T08:36:26.724169+0000 mgr.vm02.ttibzz (mgr.14195) 52 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm02.local:9095"}]: dispatch 2026-03-10T08:36:28.474 INFO:teuthology.orchestra.run.vm02.stdout:# minimal ceph.conf for e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:28.474 INFO:teuthology.orchestra.run.vm02.stdout:[global] 2026-03-10T08:36:28.474 INFO:teuthology.orchestra.run.vm02.stdout: fsid = e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:36:28.474 INFO:teuthology.orchestra.run.vm02.stdout: mon_host = [v2:192.168.123.102:3300/0,v1:192.168.123.102:6789/0] [v2:192.168.123.107:3300/0,v1:192.168.123.107:6789/0] 2026-03-10T08:36:28.548 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-10T08:36:28.548 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:36:28.548 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T08:36:28.555 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:36:28.555 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T08:36:28.606 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T08:36:28.606 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.conf 2026-03-10T08:36:28.613 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T08:36:28.613 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-10T08:36:28.663 INFO:tasks.cephadm:Deploying OSDs... 2026-03-10T08:36:28.663 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:36:28.663 DEBUG:teuthology.orchestra.run.vm02:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T08:36:28.667 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:36:28.667 DEBUG:teuthology.orchestra.run.vm02:> ls /dev/[sv]d? 2026-03-10T08:36:28.713 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vda 2026-03-10T08:36:28.714 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdb 2026-03-10T08:36:28.714 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdc 2026-03-10T08:36:28.714 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vdd 2026-03-10T08:36:28.714 INFO:teuthology.orchestra.run.vm02.stdout:/dev/vde 2026-03-10T08:36:28.714 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T08:36:28.714 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T08:36:28.714 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdb 2026-03-10T08:36:28.758 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdb 2026-03-10T08:36:28.759 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:28.759 INFO:teuthology.orchestra.run.vm02.stdout:Device: 5h/5d Inode: 24 Links: 1 Device type: fe,10 2026-03-10T08:36:28.759 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:28.759 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 08:31:09.726465145 +0000 2026-03-10T08:36:28.759 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 08:31:08.666465145 +0000 2026-03-10T08:36:28.759 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 08:31:08.666465145 +0000 2026-03-10T08:36:28.759 INFO:teuthology.orchestra.run.vm02.stdout: Birth: - 2026-03-10T08:36:28.759 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T08:36:28.811 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T08:36:28.811 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T08:36:28.811 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000195195 s, 2.6 MB/s 2026-03-10T08:36:28.812 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T08:36:28.864 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdc 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdc 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout:Device: 5h/5d Inode: 25 Links: 1 Device type: fe,20 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 08:31:09.742465145 +0000 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 08:31:08.670465145 +0000 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 08:31:08.670465145 +0000 2026-03-10T08:36:28.910 INFO:teuthology.orchestra.run.vm02.stdout: Birth: - 2026-03-10T08:36:28.910 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T08:36:28.959 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T08:36:28.959 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T08:36:28.959 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000177833 s, 2.9 MB/s 2026-03-10T08:36:28.960 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T08:36:29.007 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vdd 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vdd 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout:Device: 5h/5d Inode: 26 Links: 1 Device type: fe,30 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 08:31:09.726465145 +0000 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 08:31:08.646465145 +0000 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 08:31:08.646465145 +0000 2026-03-10T08:36:29.053 INFO:teuthology.orchestra.run.vm02.stdout: Birth: - 2026-03-10T08:36:29.053 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T08:36:29.099 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:28 vm02 bash[17473]: audit 2026-03-10T08:36:28.470740+0000 mon.vm02 (mon.0) 329 : audit [DBG] from='client.? 192.168.123.102:0/52039943' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:29.099 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:28 vm02 bash[17473]: audit 2026-03-10T08:36:28.470740+0000 mon.vm02 (mon.0) 329 : audit [DBG] from='client.? 192.168.123.102:0/52039943' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:29.100 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T08:36:29.100 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T08:36:29.100 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.000174116 s, 2.9 MB/s 2026-03-10T08:36:29.100 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T08:36:29.145 DEBUG:teuthology.orchestra.run.vm02:> stat /dev/vde 2026-03-10T08:36:29.188 INFO:teuthology.orchestra.run.vm02.stdout: File: /dev/vde 2026-03-10T08:36:29.189 INFO:teuthology.orchestra.run.vm02.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:29.189 INFO:teuthology.orchestra.run.vm02.stdout:Device: 5h/5d Inode: 27 Links: 1 Device type: fe,40 2026-03-10T08:36:29.189 INFO:teuthology.orchestra.run.vm02.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:29.189 INFO:teuthology.orchestra.run.vm02.stdout:Access: 2026-03-10 08:31:09.738465145 +0000 2026-03-10T08:36:29.189 INFO:teuthology.orchestra.run.vm02.stdout:Modify: 2026-03-10 08:31:08.674465145 +0000 2026-03-10T08:36:29.189 INFO:teuthology.orchestra.run.vm02.stdout:Change: 2026-03-10 08:31:08.674465145 +0000 2026-03-10T08:36:29.189 INFO:teuthology.orchestra.run.vm02.stdout: Birth: - 2026-03-10T08:36:29.189 DEBUG:teuthology.orchestra.run.vm02:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T08:36:29.236 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records in 2026-03-10T08:36:29.236 INFO:teuthology.orchestra.run.vm02.stderr:1+0 records out 2026-03-10T08:36:29.236 INFO:teuthology.orchestra.run.vm02.stderr:512 bytes copied, 0.00015535 s, 3.3 MB/s 2026-03-10T08:36:29.237 DEBUG:teuthology.orchestra.run.vm02:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T08:36:29.281 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T08:36:29.281 DEBUG:teuthology.orchestra.run.vm07:> dd if=/scratch_devs of=/dev/stdout 2026-03-10T08:36:29.284 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:36:29.284 DEBUG:teuthology.orchestra.run.vm07:> ls /dev/[sv]d? 2026-03-10T08:36:29.328 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vda 2026-03-10T08:36:29.328 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdb 2026-03-10T08:36:29.328 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdc 2026-03-10T08:36:29.328 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vdd 2026-03-10T08:36:29.328 INFO:teuthology.orchestra.run.vm07.stdout:/dev/vde 2026-03-10T08:36:29.328 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-10T08:36:29.328 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-10T08:36:29.328 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdb 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdb 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout:Device: 5h/5d Inode: 24 Links: 1 Device type: fe,10 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 08:30:38.675870077 +0000 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 08:30:37.583870077 +0000 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 08:30:37.583870077 +0000 2026-03-10T08:36:29.372 INFO:teuthology.orchestra.run.vm07.stdout: Birth: - 2026-03-10T08:36:29.372 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-10T08:36:29.419 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T08:36:29.419 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T08:36:29.419 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000172323 s, 3.0 MB/s 2026-03-10T08:36:29.420 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-10T08:36:29.465 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdc 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdc 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout:Device: 5h/5d Inode: 25 Links: 1 Device type: fe,20 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 08:30:38.695870077 +0000 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 08:30:37.563870077 +0000 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 08:30:37.563870077 +0000 2026-03-10T08:36:29.508 INFO:teuthology.orchestra.run.vm07.stdout: Birth: - 2026-03-10T08:36:29.508 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-10T08:36:29.556 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T08:36:29.556 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T08:36:29.556 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000154019 s, 3.3 MB/s 2026-03-10T08:36:29.557 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-10T08:36:29.605 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vdd 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vdd 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout:Device: 5h/5d Inode: 26 Links: 1 Device type: fe,30 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 08:30:38.675870077 +0000 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 08:30:37.575870077 +0000 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 08:30:37.575870077 +0000 2026-03-10T08:36:29.652 INFO:teuthology.orchestra.run.vm07.stdout: Birth: - 2026-03-10T08:36:29.652 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-10T08:36:29.700 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T08:36:29.700 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T08:36:29.700 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000147095 s, 3.5 MB/s 2026-03-10T08:36:29.700 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-10T08:36:29.744 DEBUG:teuthology.orchestra.run.vm07:> stat /dev/vde 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout: File: /dev/vde 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout:Device: 5h/5d Inode: 27 Links: 1 Device type: fe,40 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout:Access: 2026-03-10 08:30:38.695870077 +0000 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout:Modify: 2026-03-10 08:30:37.563870077 +0000 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout:Change: 2026-03-10 08:30:37.563870077 +0000 2026-03-10T08:36:29.788 INFO:teuthology.orchestra.run.vm07.stdout: Birth: - 2026-03-10T08:36:29.788 DEBUG:teuthology.orchestra.run.vm07:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-10T08:36:29.835 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records in 2026-03-10T08:36:29.835 INFO:teuthology.orchestra.run.vm07.stderr:1+0 records out 2026-03-10T08:36:29.835 INFO:teuthology.orchestra.run.vm07.stderr:512 bytes copied, 0.000139691 s, 3.7 MB/s 2026-03-10T08:36:29.836 DEBUG:teuthology.orchestra.run.vm07:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-10T08:36:29.881 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch apply osd --all-available-devices 2026-03-10T08:36:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:29 vm02 bash[17473]: cluster 2026-03-10T08:36:28.194165+0000 mgr.vm02.ttibzz (mgr.14195) 53 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:29 vm02 bash[17473]: cluster 2026-03-10T08:36:28.194165+0000 mgr.vm02.ttibzz (mgr.14195) 53 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:30 vm02 bash[17473]: cluster 2026-03-10T08:36:30.194359+0000 mgr.vm02.ttibzz (mgr.14195) 54 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:30 vm02 bash[17473]: cluster 2026-03-10T08:36:30.194359+0000 mgr.vm02.ttibzz (mgr.14195) 54 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.731794+0000 mon.vm02 (mon.0) 330 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.731794+0000 mon.vm02 (mon.0) 330 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.739220+0000 mon.vm02 (mon.0) 331 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.739220+0000 mon.vm02 (mon.0) 331 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.757946+0000 mon.vm02 (mon.0) 332 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.757946+0000 mon.vm02 (mon.0) 332 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.763062+0000 mon.vm02 (mon.0) 333 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.763062+0000 mon.vm02 (mon.0) 333 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.764025+0000 mon.vm02 (mon.0) 334 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.764025+0000 mon.vm02 (mon.0) 334 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.764476+0000 mon.vm02 (mon.0) 335 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.764476+0000 mon.vm02 (mon.0) 335 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.768524+0000 mon.vm02 (mon.0) 336 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:32 vm02 bash[17473]: audit 2026-03-10T08:36:31.768524+0000 mon.vm02 (mon.0) 336 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:33.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:33 vm02 bash[17473]: cluster 2026-03-10T08:36:32.194598+0000 mgr.vm02.ttibzz (mgr.14195) 55 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:33.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:33 vm02 bash[17473]: cluster 2026-03-10T08:36:32.194598+0000 mgr.vm02.ttibzz (mgr.14195) 55 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:34.547 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm07/config 2026-03-10T08:36:34.808 INFO:teuthology.orchestra.run.vm07.stdout:Scheduled osd.all-available-devices update... 2026-03-10T08:36:34.878 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-10T08:36:34.879 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:36:35.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:34 vm02 bash[17473]: audit 2026-03-10T08:36:34.234656+0000 mon.vm02 (mon.0) 337 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:35.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:34 vm02 bash[17473]: audit 2026-03-10T08:36:34.234656+0000 mon.vm02 (mon.0) 337 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cluster 2026-03-10T08:36:34.194775+0000 mgr.vm02.ttibzz (mgr.14195) 56 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cluster 2026-03-10T08:36:34.194775+0000 mgr.vm02.ttibzz (mgr.14195) 56 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: audit 2026-03-10T08:36:34.799755+0000 mgr.vm02.ttibzz (mgr.14195) 57 : audit [DBG] from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: audit 2026-03-10T08:36:34.799755+0000 mgr.vm02.ttibzz (mgr.14195) 57 : audit [DBG] from='client.24103 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cephadm 2026-03-10T08:36:34.800734+0000 mgr.vm02.ttibzz (mgr.14195) 58 : cephadm [INF] Marking host: vm02 for OSDSpec preview refresh. 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cephadm 2026-03-10T08:36:34.800734+0000 mgr.vm02.ttibzz (mgr.14195) 58 : cephadm [INF] Marking host: vm02 for OSDSpec preview refresh. 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cephadm 2026-03-10T08:36:34.800757+0000 mgr.vm02.ttibzz (mgr.14195) 59 : cephadm [INF] Marking host: vm07 for OSDSpec preview refresh. 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cephadm 2026-03-10T08:36:34.800757+0000 mgr.vm02.ttibzz (mgr.14195) 59 : cephadm [INF] Marking host: vm07 for OSDSpec preview refresh. 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cephadm 2026-03-10T08:36:34.800904+0000 mgr.vm02.ttibzz (mgr.14195) 60 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: cephadm 2026-03-10T08:36:34.800904+0000 mgr.vm02.ttibzz (mgr.14195) 60 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: audit 2026-03-10T08:36:34.803834+0000 mon.vm02 (mon.0) 338 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: audit 2026-03-10T08:36:34.803834+0000 mon.vm02 (mon.0) 338 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: audit 2026-03-10T08:36:34.804532+0000 mon.vm02 (mon.0) 339 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:35 vm02 bash[17473]: audit 2026-03-10T08:36:34.804532+0000 mon.vm02 (mon.0) 339 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:36:38.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:37 vm02 bash[17473]: cluster 2026-03-10T08:36:36.194974+0000 mgr.vm02.ttibzz (mgr.14195) 61 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:38.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:37 vm02 bash[17473]: cluster 2026-03-10T08:36:36.194974+0000 mgr.vm02.ttibzz (mgr.14195) 61 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:39.511 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:36:39.834 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:36:39.913 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: cluster 2026-03-10T08:36:38.195178+0000 mgr.vm02.ttibzz (mgr.14195) 62 : cluster [DBG] pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: cluster 2026-03-10T08:36:38.195178+0000 mgr.vm02.ttibzz (mgr.14195) 62 : cluster [DBG] pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.795883+0000 mon.vm02 (mon.0) 340 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.795883+0000 mon.vm02 (mon.0) 340 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.801511+0000 mon.vm02 (mon.0) 341 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.801511+0000 mon.vm02 (mon.0) 341 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.810229+0000 mon.vm02 (mon.0) 342 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.810229+0000 mon.vm02 (mon.0) 342 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.813656+0000 mon.vm02 (mon.0) 343 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:39 vm02 bash[17473]: audit 2026-03-10T08:36:39.813656+0000 mon.vm02 (mon.0) 343 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:40.914 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:39.830287+0000 mon.vm02 (mon.0) 344 : audit [DBG] from='client.? 192.168.123.102:0/4012478334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:39.830287+0000 mon.vm02 (mon.0) 344 : audit [DBG] from='client.? 192.168.123.102:0/4012478334' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.156640+0000 mon.vm02 (mon.0) 345 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.156640+0000 mon.vm02 (mon.0) 345 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.161083+0000 mon.vm02 (mon.0) 346 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.161083+0000 mon.vm02 (mon.0) 346 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.165210+0000 mon.vm02 (mon.0) 347 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.165210+0000 mon.vm02 (mon.0) 347 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.169352+0000 mon.vm02 (mon.0) 348 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.169352+0000 mon.vm02 (mon.0) 348 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.170062+0000 mon.vm02 (mon.0) 349 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.170062+0000 mon.vm02 (mon.0) 349 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.170481+0000 mon.vm02 (mon.0) 350 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.170481+0000 mon.vm02 (mon.0) 350 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.173943+0000 mon.vm02 (mon.0) 351 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.173943+0000 mon.vm02 (mon.0) 351 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.175496+0000 mon.vm02 (mon.0) 352 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.175496+0000 mon.vm02 (mon.0) 352 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.177375+0000 mon.vm02 (mon.0) 353 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.177375+0000 mon.vm02 (mon.0) 353 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.177746+0000 mon.vm02 (mon.0) 354 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.177746+0000 mon.vm02 (mon.0) 354 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.179351+0000 mon.vm02 (mon.0) 355 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.179351+0000 mon.vm02 (mon.0) 355 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.179821+0000 mon.vm02 (mon.0) 356 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:41.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:40 vm02 bash[17473]: audit 2026-03-10T08:36:40.179821+0000 mon.vm02 (mon.0) 356 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:36:42.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:41 vm02 bash[17473]: cluster 2026-03-10T08:36:40.195366+0000 mgr.vm02.ttibzz (mgr.14195) 63 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:42.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:41 vm02 bash[17473]: cluster 2026-03-10T08:36:40.195366+0000 mgr.vm02.ttibzz (mgr.14195) 63 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:44.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:43 vm02 bash[17473]: cluster 2026-03-10T08:36:42.195634+0000 mgr.vm02.ttibzz (mgr.14195) 64 : cluster [DBG] pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:44.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:43 vm02 bash[17473]: cluster 2026-03-10T08:36:42.195634+0000 mgr.vm02.ttibzz (mgr.14195) 64 : cluster [DBG] pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:44.972 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:36:45.267 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:36:45.328 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-10T08:36:46.127 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:45 vm02 bash[17473]: cluster 2026-03-10T08:36:44.195875+0000 mgr.vm02.ttibzz (mgr.14195) 65 : cluster [DBG] pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:46.127 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:45 vm02 bash[17473]: cluster 2026-03-10T08:36:44.195875+0000 mgr.vm02.ttibzz (mgr.14195) 65 : cluster [DBG] pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:46.127 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:45 vm02 bash[17473]: audit 2026-03-10T08:36:45.262036+0000 mon.vm02 (mon.0) 357 : audit [DBG] from='client.? 192.168.123.102:0/75900207' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:46.127 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:45 vm02 bash[17473]: audit 2026-03-10T08:36:45.262036+0000 mon.vm02 (mon.0) 357 : audit [DBG] from='client.? 192.168.123.102:0/75900207' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:46.330 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.027736+0000 mon.vm02 (mon.0) 358 : audit [INF] from='client.? 192.168.123.102:0/4220473845' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1dafd277-7c1e-4421-9e79-1d111d605b64"}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.027736+0000 mon.vm02 (mon.0) 358 : audit [INF] from='client.? 192.168.123.102:0/4220473845' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1dafd277-7c1e-4421-9e79-1d111d605b64"}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.031290+0000 mon.vm02 (mon.0) 359 : audit [INF] from='client.? 192.168.123.102:0/4220473845' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1dafd277-7c1e-4421-9e79-1d111d605b64"}]': finished 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.031290+0000 mon.vm02 (mon.0) 359 : audit [INF] from='client.? 192.168.123.102:0/4220473845' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1dafd277-7c1e-4421-9e79-1d111d605b64"}]': finished 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: cluster 2026-03-10T08:36:46.033383+0000 mon.vm02 (mon.0) 360 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: cluster 2026-03-10T08:36:46.033383+0000 mon.vm02 (mon.0) 360 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.033819+0000 mon.vm02 (mon.0) 361 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.033819+0000 mon.vm02 (mon.0) 361 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.054539+0000 mon.vm02 (mon.0) 362 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1b1ad213-cbed-4d89-b55b-35bf90079e6d"}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.054539+0000 mon.vm02 (mon.0) 362 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1b1ad213-cbed-4d89-b55b-35bf90079e6d"}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.057023+0000 mon.vm07 (mon.1) 2 : audit [INF] from='client.? 192.168.123.107:0/2026212271' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1b1ad213-cbed-4d89-b55b-35bf90079e6d"}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.057023+0000 mon.vm07 (mon.1) 2 : audit [INF] from='client.? 192.168.123.107:0/2026212271' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1b1ad213-cbed-4d89-b55b-35bf90079e6d"}]: dispatch 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.058155+0000 mon.vm02 (mon.0) 363 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1b1ad213-cbed-4d89-b55b-35bf90079e6d"}]': finished 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.058155+0000 mon.vm02 (mon.0) 363 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1b1ad213-cbed-4d89-b55b-35bf90079e6d"}]': finished 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: cluster 2026-03-10T08:36:46.060137+0000 mon.vm02 (mon.0) 364 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: cluster 2026-03-10T08:36:46.060137+0000 mon.vm02 (mon.0) 364 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-03-10T08:36:47.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.060495+0000 mon.vm02 (mon.0) 365 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:47.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.060495+0000 mon.vm02 (mon.0) 365 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:47.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.060567+0000 mon.vm02 (mon.0) 366 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:47.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.060567+0000 mon.vm02 (mon.0) 366 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:47.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.623716+0000 mon.vm02 (mon.0) 367 : audit [DBG] from='client.? 192.168.123.102:0/404296783' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:47.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.623716+0000 mon.vm02 (mon.0) 367 : audit [DBG] from='client.? 192.168.123.102:0/404296783' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:47.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.651150+0000 mon.vm07 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.107:0/1036577194' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:47.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:46 vm02 bash[17473]: audit 2026-03-10T08:36:46.651150+0000 mon.vm07 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.107:0/1036577194' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:48.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:47 vm02 bash[17473]: cluster 2026-03-10T08:36:46.196092+0000 mgr.vm02.ttibzz (mgr.14195) 66 : cluster [DBG] pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:48.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:47 vm02 bash[17473]: cluster 2026-03-10T08:36:46.196092+0000 mgr.vm02.ttibzz (mgr.14195) 66 : cluster [DBG] pgmap v16: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:50.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:49 vm02 bash[17473]: cluster 2026-03-10T08:36:48.196280+0000 mgr.vm02.ttibzz (mgr.14195) 67 : cluster [DBG] pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:50.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:49 vm02 bash[17473]: cluster 2026-03-10T08:36:48.196280+0000 mgr.vm02.ttibzz (mgr.14195) 67 : cluster [DBG] pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:50.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:49 vm02 bash[17473]: audit 2026-03-10T08:36:49.235023+0000 mon.vm02 (mon.0) 368 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:50.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:49 vm02 bash[17473]: audit 2026-03-10T08:36:49.235023+0000 mon.vm02 (mon.0) 368 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:36:50.970 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:36:51.229 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.961723+0000 mon.vm02 (mon.0) 369 : audit [INF] from='client.? 192.168.123.102:0/3874185523' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0b4900c4-461a-401b-8901-8eb5567ddaa3"}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.961723+0000 mon.vm02 (mon.0) 369 : audit [INF] from='client.? 192.168.123.102:0/3874185523' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "0b4900c4-461a-401b-8901-8eb5567ddaa3"}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.963984+0000 mon.vm02 (mon.0) 370 : audit [INF] from='client.? 192.168.123.102:0/3874185523' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0b4900c4-461a-401b-8901-8eb5567ddaa3"}]': finished 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.963984+0000 mon.vm02 (mon.0) 370 : audit [INF] from='client.? 192.168.123.102:0/3874185523' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0b4900c4-461a-401b-8901-8eb5567ddaa3"}]': finished 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: cluster 2026-03-10T08:36:49.966454+0000 mon.vm02 (mon.0) 371 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: cluster 2026-03-10T08:36:49.966454+0000 mon.vm02 (mon.0) 371 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.966587+0000 mon.vm02 (mon.0) 372 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.966587+0000 mon.vm02 (mon.0) 372 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.966662+0000 mon.vm02 (mon.0) 373 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.966662+0000 mon.vm02 (mon.0) 373 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.966714+0000 mon.vm02 (mon.0) 374 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:49.966714+0000 mon.vm02 (mon.0) 374 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:51.247 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.009676+0000 mon.vm02 (mon.0) 375 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1f0eade6-7d8c-4351-968a-f172320dc16e"}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.009676+0000 mon.vm02 (mon.0) 375 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1f0eade6-7d8c-4351-968a-f172320dc16e"}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.012087+0000 mon.vm02 (mon.0) 376 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1f0eade6-7d8c-4351-968a-f172320dc16e"}]': finished 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.012087+0000 mon.vm02 (mon.0) 376 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1f0eade6-7d8c-4351-968a-f172320dc16e"}]': finished 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.012669+0000 mon.vm07 (mon.1) 4 : audit [INF] from='client.? 192.168.123.107:0/1686225145' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1f0eade6-7d8c-4351-968a-f172320dc16e"}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.012669+0000 mon.vm07 (mon.1) 4 : audit [INF] from='client.? 192.168.123.107:0/1686225145' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "1f0eade6-7d8c-4351-968a-f172320dc16e"}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: cluster 2026-03-10T08:36:50.014577+0000 mon.vm02 (mon.0) 377 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: cluster 2026-03-10T08:36:50.014577+0000 mon.vm02 (mon.0) 377 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014712+0000 mon.vm02 (mon.0) 378 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014712+0000 mon.vm02 (mon.0) 378 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014776+0000 mon.vm02 (mon.0) 379 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014776+0000 mon.vm02 (mon.0) 379 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014831+0000 mon.vm02 (mon.0) 380 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014831+0000 mon.vm02 (mon.0) 380 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014880+0000 mon.vm02 (mon.0) 381 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.014880+0000 mon.vm02 (mon.0) 381 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.573174+0000 mon.vm02 (mon.0) 382 : audit [DBG] from='client.? 192.168.123.102:0/3996250158' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.573174+0000 mon.vm02 (mon.0) 382 : audit [DBG] from='client.? 192.168.123.102:0/3996250158' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.592271+0000 mon.vm07 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.107:0/1741419168' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:51.248 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:50 vm02 bash[17473]: audit 2026-03-10T08:36:50.592271+0000 mon.vm07 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.107:0/1741419168' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:51.294 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1773131810,"num_remapped_pgs":0} 2026-03-10T08:36:52.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:51 vm02 bash[17473]: cluster 2026-03-10T08:36:50.196461+0000 mgr.vm02.ttibzz (mgr.14195) 68 : cluster [DBG] pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:52.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:51 vm02 bash[17473]: cluster 2026-03-10T08:36:50.196461+0000 mgr.vm02.ttibzz (mgr.14195) 68 : cluster [DBG] pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:52.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:51 vm02 bash[17473]: audit 2026-03-10T08:36:51.225873+0000 mon.vm02 (mon.0) 383 : audit [DBG] from='client.? 192.168.123.102:0/4234519206' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:52.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:51 vm02 bash[17473]: audit 2026-03-10T08:36:51.225873+0000 mon.vm02 (mon.0) 383 : audit [DBG] from='client.? 192.168.123.102:0/4234519206' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:52.295 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: cluster 2026-03-10T08:36:52.196684+0000 mgr.vm02.ttibzz (mgr.14195) 69 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: cluster 2026-03-10T08:36:52.196684+0000 mgr.vm02.ttibzz (mgr.14195) 69 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.714849+0000 mon.vm02 (mon.0) 384 : audit [INF] from='client.? 192.168.123.102:0/2919566460' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "046bd431-09bd-47e8-9f2b-13ec28873f26"}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.714849+0000 mon.vm02 (mon.0) 384 : audit [INF] from='client.? 192.168.123.102:0/2919566460' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "046bd431-09bd-47e8-9f2b-13ec28873f26"}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.718158+0000 mon.vm02 (mon.0) 385 : audit [INF] from='client.? 192.168.123.102:0/2919566460' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "046bd431-09bd-47e8-9f2b-13ec28873f26"}]': finished 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.718158+0000 mon.vm02 (mon.0) 385 : audit [INF] from='client.? 192.168.123.102:0/2919566460' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "046bd431-09bd-47e8-9f2b-13ec28873f26"}]': finished 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: cluster 2026-03-10T08:36:53.720637+0000 mon.vm02 (mon.0) 386 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: cluster 2026-03-10T08:36:53.720637+0000 mon.vm02 (mon.0) 386 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.721159+0000 mon.vm02 (mon.0) 387 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.721159+0000 mon.vm02 (mon.0) 387 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.721628+0000 mon.vm02 (mon.0) 388 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.721628+0000 mon.vm02 (mon.0) 388 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.721963+0000 mon.vm02 (mon.0) 389 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.721963+0000 mon.vm02 (mon.0) 389 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.722297+0000 mon.vm02 (mon.0) 390 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.722297+0000 mon.vm02 (mon.0) 390 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.722636+0000 mon.vm02 (mon.0) 391 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:54.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:53 vm02 bash[17473]: audit 2026-03-10T08:36:53.722636+0000 mon.vm02 (mon.0) 391 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.202291+0000 mon.vm02 (mon.0) 392 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "121b84e4-6673-42a6-a5d5-5b1356bedbf6"}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.202291+0000 mon.vm02 (mon.0) 392 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "121b84e4-6673-42a6-a5d5-5b1356bedbf6"}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.205305+0000 mon.vm07 (mon.1) 6 : audit [INF] from='client.? 192.168.123.107:0/505641357' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "121b84e4-6673-42a6-a5d5-5b1356bedbf6"}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.205305+0000 mon.vm07 (mon.1) 6 : audit [INF] from='client.? 192.168.123.107:0/505641357' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "121b84e4-6673-42a6-a5d5-5b1356bedbf6"}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.205417+0000 mon.vm02 (mon.0) 393 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "121b84e4-6673-42a6-a5d5-5b1356bedbf6"}]': finished 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.205417+0000 mon.vm02 (mon.0) 393 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "121b84e4-6673-42a6-a5d5-5b1356bedbf6"}]': finished 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: cluster 2026-03-10T08:36:54.208389+0000 mon.vm02 (mon.0) 394 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: cluster 2026-03-10T08:36:54.208389+0000 mon.vm02 (mon.0) 394 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208571+0000 mon.vm02 (mon.0) 395 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208571+0000 mon.vm02 (mon.0) 395 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208682+0000 mon.vm02 (mon.0) 396 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208682+0000 mon.vm02 (mon.0) 396 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208752+0000 mon.vm02 (mon.0) 397 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208752+0000 mon.vm02 (mon.0) 397 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208816+0000 mon.vm02 (mon.0) 398 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208816+0000 mon.vm02 (mon.0) 398 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208880+0000 mon.vm02 (mon.0) 399 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208880+0000 mon.vm02 (mon.0) 399 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208944+0000 mon.vm02 (mon.0) 400 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.208944+0000 mon.vm02 (mon.0) 400 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.334969+0000 mon.vm02 (mon.0) 401 : audit [DBG] from='client.? 192.168.123.102:0/1010164165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.334969+0000 mon.vm02 (mon.0) 401 : audit [DBG] from='client.? 192.168.123.102:0/1010164165' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.846638+0000 mon.vm07 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.107:0/1823515045' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:55.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:54 vm02 bash[17473]: audit 2026-03-10T08:36:54.846638+0000 mon.vm07 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.107:0/1823515045' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:56.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:55 vm02 bash[17473]: cluster 2026-03-10T08:36:54.196870+0000 mgr.vm02.ttibzz (mgr.14195) 70 : cluster [DBG] pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:56.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:55 vm02 bash[17473]: cluster 2026-03-10T08:36:54.196870+0000 mgr.vm02.ttibzz (mgr.14195) 70 : cluster [DBG] pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:56.923 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:36:57.162 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:36:57.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:56 vm02 bash[17473]: cluster 2026-03-10T08:36:56.197065+0000 mgr.vm02.ttibzz (mgr.14195) 71 : cluster [DBG] pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:57.181 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:56 vm02 bash[17473]: cluster 2026-03-10T08:36:56.197065+0000 mgr.vm02.ttibzz (mgr.14195) 71 : cluster [DBG] pgmap v25: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:57.229 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1773131814,"num_remapped_pgs":0} 2026-03-10T08:36:58.229 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.159217+0000 mon.vm02 (mon.0) 402 : audit [DBG] from='client.? 192.168.123.102:0/2531758956' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.159217+0000 mon.vm02 (mon.0) 402 : audit [DBG] from='client.? 192.168.123.102:0/2531758956' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.700503+0000 mon.vm02 (mon.0) 403 : audit [INF] from='client.? 192.168.123.102:0/1461457541' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2846d411-a5e4-48a3-92a9-ede09394307e"}]: dispatch 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.700503+0000 mon.vm02 (mon.0) 403 : audit [INF] from='client.? 192.168.123.102:0/1461457541' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "2846d411-a5e4-48a3-92a9-ede09394307e"}]: dispatch 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.703847+0000 mon.vm02 (mon.0) 404 : audit [INF] from='client.? 192.168.123.102:0/1461457541' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2846d411-a5e4-48a3-92a9-ede09394307e"}]': finished 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.703847+0000 mon.vm02 (mon.0) 404 : audit [INF] from='client.? 192.168.123.102:0/1461457541' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2846d411-a5e4-48a3-92a9-ede09394307e"}]': finished 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: cluster 2026-03-10T08:36:57.705883+0000 mon.vm02 (mon.0) 405 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: cluster 2026-03-10T08:36:57.705883+0000 mon.vm02 (mon.0) 405 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-03-10T08:36:58.238 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706216+0000 mon.vm02 (mon.0) 406 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706216+0000 mon.vm02 (mon.0) 406 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706358+0000 mon.vm02 (mon.0) 407 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706358+0000 mon.vm02 (mon.0) 407 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706479+0000 mon.vm02 (mon.0) 408 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706479+0000 mon.vm02 (mon.0) 408 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706592+0000 mon.vm02 (mon.0) 409 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706592+0000 mon.vm02 (mon.0) 409 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706709+0000 mon.vm02 (mon.0) 410 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706709+0000 mon.vm02 (mon.0) 410 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706829+0000 mon.vm02 (mon.0) 411 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706829+0000 mon.vm02 (mon.0) 411 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706946+0000 mon.vm02 (mon.0) 412 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:36:58.239 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:57 vm02 bash[17473]: audit 2026-03-10T08:36:57.706946+0000 mon.vm02 (mon.0) 412 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.181092+0000 mon.vm02 (mon.0) 413 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "94f52a63-655e-4086-bdc2-473506492269"}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.181092+0000 mon.vm02 (mon.0) 413 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "94f52a63-655e-4086-bdc2-473506492269"}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.184069+0000 mon.vm02 (mon.0) 414 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "94f52a63-655e-4086-bdc2-473506492269"}]': finished 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.184069+0000 mon.vm02 (mon.0) 414 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "94f52a63-655e-4086-bdc2-473506492269"}]': finished 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.184128+0000 mon.vm07 (mon.1) 8 : audit [INF] from='client.? 192.168.123.107:0/3466930583' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "94f52a63-655e-4086-bdc2-473506492269"}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.184128+0000 mon.vm07 (mon.1) 8 : audit [INF] from='client.? 192.168.123.107:0/3466930583' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "94f52a63-655e-4086-bdc2-473506492269"}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: cluster 2026-03-10T08:36:58.186786+0000 mon.vm02 (mon.0) 415 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: cluster 2026-03-10T08:36:58.186786+0000 mon.vm02 (mon.0) 415 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.186932+0000 mon.vm02 (mon.0) 416 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.186932+0000 mon.vm02 (mon.0) 416 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187034+0000 mon.vm02 (mon.0) 417 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187034+0000 mon.vm02 (mon.0) 417 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187137+0000 mon.vm02 (mon.0) 418 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187137+0000 mon.vm02 (mon.0) 418 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187213+0000 mon.vm02 (mon.0) 419 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187213+0000 mon.vm02 (mon.0) 419 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187285+0000 mon.vm02 (mon.0) 420 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187285+0000 mon.vm02 (mon.0) 420 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187355+0000 mon.vm02 (mon.0) 421 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187355+0000 mon.vm02 (mon.0) 421 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187425+0000 mon.vm02 (mon.0) 422 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187425+0000 mon.vm02 (mon.0) 422 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187514+0000 mon.vm02 (mon.0) 423 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.187514+0000 mon.vm02 (mon.0) 423 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: cluster 2026-03-10T08:36:58.197222+0000 mgr.vm02.ttibzz (mgr.14195) 72 : cluster [DBG] pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: cluster 2026-03-10T08:36:58.197222+0000 mgr.vm02.ttibzz (mgr.14195) 72 : cluster [DBG] pgmap v28: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.391657+0000 mon.vm07 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.102:0/3375282373' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.391657+0000 mon.vm07 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.102:0/3375282373' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.764390+0000 mon.vm07 (mon.1) 10 : audit [DBG] from='client.? 192.168.123.107:0/624792352' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:36:59.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:36:58 vm02 bash[17473]: audit 2026-03-10T08:36:58.764390+0000 mon.vm07 (mon.1) 10 : audit [DBG] from='client.? 192.168.123.107:0/624792352' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-10T08:37:01.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:01 vm02 bash[17473]: cluster 2026-03-10T08:37:00.197387+0000 mgr.vm02.ttibzz (mgr.14195) 73 : cluster [DBG] pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:01.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:01 vm02 bash[17473]: cluster 2026-03-10T08:37:00.197387+0000 mgr.vm02.ttibzz (mgr.14195) 73 : cluster [DBG] pgmap v29: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:02.858 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:03.101 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:03.170 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773131818,"num_remapped_pgs":0} 2026-03-10T08:37:03.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:03 vm02 bash[17473]: cluster 2026-03-10T08:37:02.197597+0000 mgr.vm02.ttibzz (mgr.14195) 74 : cluster [DBG] pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:03.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:03 vm02 bash[17473]: cluster 2026-03-10T08:37:02.197597+0000 mgr.vm02.ttibzz (mgr.14195) 74 : cluster [DBG] pgmap v30: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:03.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:03 vm02 bash[17473]: audit 2026-03-10T08:37:03.097673+0000 mon.vm02 (mon.0) 424 : audit [DBG] from='client.? 192.168.123.102:0/2236517379' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:03.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:03 vm02 bash[17473]: audit 2026-03-10T08:37:03.097673+0000 mon.vm02 (mon.0) 424 : audit [DBG] from='client.? 192.168.123.102:0/2236517379' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:04.171 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:37:04.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:04 vm02 bash[17473]: audit 2026-03-10T08:37:04.235200+0000 mon.vm02 (mon.0) 425 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:04.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:04 vm02 bash[17473]: audit 2026-03-10T08:37:04.235200+0000 mon.vm02 (mon.0) 425 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:05 vm02 bash[17473]: cluster 2026-03-10T08:37:04.197810+0000 mgr.vm02.ttibzz (mgr.14195) 75 : cluster [DBG] pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:05 vm02 bash[17473]: cluster 2026-03-10T08:37:04.197810+0000 mgr.vm02.ttibzz (mgr.14195) 75 : cluster [DBG] pgmap v31: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:06.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:06 vm02 bash[17473]: audit 2026-03-10T08:37:06.299613+0000 mon.vm02 (mon.0) 426 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T08:37:06.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:06 vm02 bash[17473]: audit 2026-03-10T08:37:06.299613+0000 mon.vm02 (mon.0) 426 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-10T08:37:06.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:06 vm02 bash[17473]: audit 2026-03-10T08:37:06.300163+0000 mon.vm02 (mon.0) 427 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:06.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:06 vm02 bash[17473]: audit 2026-03-10T08:37:06.300163+0000 mon.vm02 (mon.0) 427 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:07.167 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:07.429 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: cluster 2026-03-10T08:37:06.197993+0000 mgr.vm02.ttibzz (mgr.14195) 76 : cluster [DBG] pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: cluster 2026-03-10T08:37:06.197993+0000 mgr.vm02.ttibzz (mgr.14195) 76 : cluster [DBG] pgmap v32: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: cephadm 2026-03-10T08:37:06.300597+0000 mgr.vm02.ttibzz (mgr.14195) 77 : cephadm [INF] Deploying daemon osd.0 on vm02 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: cephadm 2026-03-10T08:37:06.300597+0000 mgr.vm02.ttibzz (mgr.14195) 77 : cephadm [INF] Deploying daemon osd.0 on vm02 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: audit 2026-03-10T08:37:07.064977+0000 mon.vm02 (mon.0) 428 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: audit 2026-03-10T08:37:07.064977+0000 mon.vm02 (mon.0) 428 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: audit 2026-03-10T08:37:07.068089+0000 mon.vm02 (mon.0) 429 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:07.754 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:07 vm02 bash[17473]: audit 2026-03-10T08:37:07.068089+0000 mon.vm02 (mon.0) 429 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:07.889 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:07.968 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:07 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:08.242 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:08 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:08.433 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: cephadm 2026-03-10T08:37:07.069616+0000 mgr.vm02.ttibzz (mgr.14195) 78 : cephadm [INF] Deploying daemon osd.1 on vm07 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: cephadm 2026-03-10T08:37:07.069616+0000 mgr.vm02.ttibzz (mgr.14195) 78 : cephadm [INF] Deploying daemon osd.1 on vm07 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.433042+0000 mon.vm02 (mon.0) 430 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.433042+0000 mon.vm02 (mon.0) 430 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.443793+0000 mon.vm02 (mon.0) 431 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.443793+0000 mon.vm02 (mon.0) 431 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.444613+0000 mon.vm02 (mon.0) 432 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.444613+0000 mon.vm02 (mon.0) 432 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.445194+0000 mon.vm02 (mon.0) 433 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:07.445194+0000 mon.vm02 (mon.0) 433 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: cephadm 2026-03-10T08:37:07.445643+0000 mgr.vm02.ttibzz (mgr.14195) 79 : cephadm [INF] Deploying daemon osd.2 on vm02 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: cephadm 2026-03-10T08:37:07.445643+0000 mgr.vm02.ttibzz (mgr.14195) 79 : cephadm [INF] Deploying daemon osd.2 on vm02 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.334594+0000 mon.vm02 (mon.0) 434 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.334594+0000 mon.vm02 (mon.0) 434 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.342855+0000 mon.vm02 (mon.0) 435 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.342855+0000 mon.vm02 (mon.0) 435 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.344060+0000 mon.vm02 (mon.0) 436 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.344060+0000 mon.vm02 (mon.0) 436 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.344842+0000 mon.vm02 (mon.0) 437 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:08.585 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.344842+0000 mon.vm02 (mon.0) 437 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:08.586 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.427875+0000 mon.vm02 (mon.0) 438 : audit [DBG] from='client.? 192.168.123.102:0/42592651' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:08.586 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 bash[17473]: audit 2026-03-10T08:37:08.427875+0000 mon.vm02 (mon.0) 438 : audit [DBG] from='client.? 192.168.123.102:0/42592651' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:08.742 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1773131818,"num_remapped_pgs":0} 2026-03-10T08:37:08.837 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:08.996 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:08 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:09.616 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:09 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:09.617 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:09 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: cluster 2026-03-10T08:37:08.198191+0000 mgr.vm02.ttibzz (mgr.14195) 80 : cluster [DBG] pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: cluster 2026-03-10T08:37:08.198191+0000 mgr.vm02.ttibzz (mgr.14195) 80 : cluster [DBG] pgmap v33: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: cephadm 2026-03-10T08:37:08.345281+0000 mgr.vm02.ttibzz (mgr.14195) 81 : cephadm [INF] Deploying daemon osd.3 on vm07 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: cephadm 2026-03-10T08:37:08.345281+0000 mgr.vm02.ttibzz (mgr.14195) 81 : cephadm [INF] Deploying daemon osd.3 on vm07 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.942499+0000 mon.vm02 (mon.0) 439 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.942499+0000 mon.vm02 (mon.0) 439 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.948262+0000 mon.vm02 (mon.0) 440 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.948262+0000 mon.vm02 (mon.0) 440 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.949080+0000 mon.vm02 (mon.0) 441 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.949080+0000 mon.vm02 (mon.0) 441 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.950508+0000 mon.vm02 (mon.0) 442 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:09.698 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:09 vm02 bash[17473]: audit 2026-03-10T08:37:08.950508+0000 mon.vm02 (mon.0) 442 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:09.743 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:37:10.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:10.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: cephadm 2026-03-10T08:37:08.950933+0000 mgr.vm02.ttibzz (mgr.14195) 82 : cephadm [INF] Deploying daemon osd.4 on vm02 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: cephadm 2026-03-10T08:37:08.950933+0000 mgr.vm02.ttibzz (mgr.14195) 82 : cephadm [INF] Deploying daemon osd.4 on vm02 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.706709+0000 mon.vm02 (mon.0) 443 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.706709+0000 mon.vm02 (mon.0) 443 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.712047+0000 mon.vm02 (mon.0) 444 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.712047+0000 mon.vm02 (mon.0) 444 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.712750+0000 mon.vm02 (mon.0) 445 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.712750+0000 mon.vm02 (mon.0) 445 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.713249+0000 mon.vm02 (mon.0) 446 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:09.713249+0000 mon.vm02 (mon.0) 446 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: cephadm 2026-03-10T08:37:09.713635+0000 mgr.vm02.ttibzz (mgr.14195) 83 : cephadm [INF] Deploying daemon osd.5 on vm07 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: cephadm 2026-03-10T08:37:09.713635+0000 mgr.vm02.ttibzz (mgr.14195) 83 : cephadm [INF] Deploying daemon osd.5 on vm07 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.494042+0000 mon.vm02 (mon.0) 447 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.494042+0000 mon.vm02 (mon.0) 447 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.499192+0000 mon.vm02 (mon.0) 448 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.499192+0000 mon.vm02 (mon.0) 448 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.499884+0000 mon.vm02 (mon.0) 449 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.499884+0000 mon.vm02 (mon.0) 449 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.500394+0000 mon.vm02 (mon.0) 450 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:10.805 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:10 vm02 bash[17473]: audit 2026-03-10T08:37:10.500394+0000 mon.vm02 (mon.0) 450 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:10.983 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:10 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:11.247 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:11 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:11.729 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: cluster 2026-03-10T08:37:10.198408+0000 mgr.vm02.ttibzz (mgr.14195) 84 : cluster [DBG] pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: cluster 2026-03-10T08:37:10.198408+0000 mgr.vm02.ttibzz (mgr.14195) 84 : cluster [DBG] pgmap v34: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: cephadm 2026-03-10T08:37:10.500789+0000 mgr.vm02.ttibzz (mgr.14195) 85 : cephadm [INF] Deploying daemon osd.6 on vm02 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: cephadm 2026-03-10T08:37:10.500789+0000 mgr.vm02.ttibzz (mgr.14195) 85 : cephadm [INF] Deploying daemon osd.6 on vm02 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.143918+0000 mon.vm02 (mon.0) 451 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.143918+0000 mon.vm02 (mon.0) 451 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.156097+0000 mon.vm02 (mon.0) 452 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.156097+0000 mon.vm02 (mon.0) 452 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.159016+0000 mon.vm02 (mon.0) 453 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.159016+0000 mon.vm02 (mon.0) 453 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.163726+0000 mon.vm02 (mon.0) 454 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.163726+0000 mon.vm02 (mon.0) 454 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.523234+0000 mon.vm02 (mon.0) 455 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 bash[17473]: audit 2026-03-10T08:37:11.523234+0000 mon.vm02 (mon.0) 455 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-10T08:37:11.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:11 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:12.446 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:12 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:12.698 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:37:12 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: cephadm 2026-03-10T08:37:11.164794+0000 mgr.vm02.ttibzz (mgr.14195) 86 : cephadm [INF] Deploying daemon osd.7 on vm07 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: cephadm 2026-03-10T08:37:11.164794+0000 mgr.vm02.ttibzz (mgr.14195) 86 : cephadm [INF] Deploying daemon osd.7 on vm07 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:11.999724+0000 mon.vm02 (mon.0) 456 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:11.999724+0000 mon.vm02 (mon.0) 456 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.009336+0000 mon.vm02 (mon.0) 457 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.009336+0000 mon.vm02 (mon.0) 457 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.152987+0000 mon.vm02 (mon.0) 458 : audit [INF] from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.152987+0000 mon.vm02 (mon.0) 458 : audit [INF] from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.156031+0000 mon.vm07 (mon.1) 11 : audit [INF] from='osd.1 [v2:192.168.123.107:6800/847448647,v1:192.168.123.107:6801/847448647]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.156031+0000 mon.vm07 (mon.1) 11 : audit [INF] from='osd.1 [v2:192.168.123.107:6800/847448647,v1:192.168.123.107:6801/847448647]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.157410+0000 mon.vm02 (mon.0) 459 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.157410+0000 mon.vm02 (mon.0) 459 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.157457+0000 mon.vm02 (mon.0) 460 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.157457+0000 mon.vm02 (mon.0) 460 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: cluster 2026-03-10T08:37:12.159658+0000 mon.vm02 (mon.0) 461 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: cluster 2026-03-10T08:37:12.159658+0000 mon.vm02 (mon.0) 461 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.159765+0000 mon.vm02 (mon.0) 462 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.159765+0000 mon.vm02 (mon.0) 462 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160129+0000 mon.vm02 (mon.0) 463 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160129+0000 mon.vm02 (mon.0) 463 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160172+0000 mon.vm02 (mon.0) 464 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160172+0000 mon.vm02 (mon.0) 464 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160197+0000 mon.vm02 (mon.0) 465 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160197+0000 mon.vm02 (mon.0) 465 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160223+0000 mon.vm02 (mon.0) 466 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160223+0000 mon.vm02 (mon.0) 466 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160248+0000 mon.vm02 (mon.0) 467 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160248+0000 mon.vm02 (mon.0) 467 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160272+0000 mon.vm02 (mon.0) 468 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160272+0000 mon.vm02 (mon.0) 468 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160295+0000 mon.vm02 (mon.0) 469 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160295+0000 mon.vm02 (mon.0) 469 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160323+0000 mon.vm02 (mon.0) 470 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.160323+0000 mon.vm02 (mon.0) 470 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:13.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.162873+0000 mon.vm02 (mon.0) 471 : audit [INF] from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:13.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.162873+0000 mon.vm02 (mon.0) 471 : audit [INF] from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:13.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.164244+0000 mon.vm07 (mon.1) 12 : audit [INF] from='osd.1 [v2:192.168.123.107:6800/847448647,v1:192.168.123.107:6801/847448647]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:13.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.164244+0000 mon.vm07 (mon.1) 12 : audit [INF] from='osd.1 [v2:192.168.123.107:6800/847448647,v1:192.168.123.107:6801/847448647]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:13.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.563349+0000 mon.vm02 (mon.0) 472 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:13.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.563349+0000 mon.vm02 (mon.0) 472 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:13.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.569599+0000 mon.vm02 (mon.0) 473 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:13.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:12 vm02 bash[17473]: audit 2026-03-10T08:37:12.569599+0000 mon.vm02 (mon.0) 473 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: cluster 2026-03-10T08:37:12.198759+0000 mgr.vm02.ttibzz (mgr.14195) 87 : cluster [DBG] pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: cluster 2026-03-10T08:37:12.198759+0000 mgr.vm02.ttibzz (mgr.14195) 87 : cluster [DBG] pgmap v36: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.245079+0000 mon.vm02 (mon.0) 474 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.245079+0000 mon.vm02 (mon.0) 474 : audit [INF] from='osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286]' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.245234+0000 mon.vm02 (mon.0) 475 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.245234+0000 mon.vm02 (mon.0) 475 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: cluster 2026-03-10T08:37:13.248551+0000 mon.vm02 (mon.0) 476 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: cluster 2026-03-10T08:37:13.248551+0000 mon.vm02 (mon.0) 476 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249252+0000 mon.vm02 (mon.0) 477 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249252+0000 mon.vm02 (mon.0) 477 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249615+0000 mon.vm02 (mon.0) 478 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249615+0000 mon.vm02 (mon.0) 478 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249657+0000 mon.vm02 (mon.0) 479 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249657+0000 mon.vm02 (mon.0) 479 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249684+0000 mon.vm02 (mon.0) 480 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249684+0000 mon.vm02 (mon.0) 480 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249710+0000 mon.vm02 (mon.0) 481 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249710+0000 mon.vm02 (mon.0) 481 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249737+0000 mon.vm02 (mon.0) 482 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249737+0000 mon.vm02 (mon.0) 482 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249764+0000 mon.vm02 (mon.0) 483 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249764+0000 mon.vm02 (mon.0) 483 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249791+0000 mon.vm02 (mon.0) 484 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.249791+0000 mon.vm02 (mon.0) 484 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.252277+0000 mon.vm02 (mon.0) 485 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.252277+0000 mon.vm02 (mon.0) 485 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.260592+0000 mon.vm02 (mon.0) 486 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.260592+0000 mon.vm02 (mon.0) 486 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.271073+0000 mon.vm02 (mon.0) 487 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.271073+0000 mon.vm02 (mon.0) 487 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.661028+0000 mon.vm02 (mon.0) 488 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T08:37:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:13 vm02 bash[17473]: audit 2026-03-10T08:37:13.661028+0000 mon.vm02 (mon.0) 488 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:12.499692+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:12.499692+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:12.499738+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:12.499738+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.252292+0000 mon.vm02 (mon.0) 489 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.252292+0000 mon.vm02 (mon.0) 489 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.258323+0000 mon.vm02 (mon.0) 490 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.258323+0000 mon.vm02 (mon.0) 490 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.264647+0000 mon.vm02 (mon.0) 491 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.264647+0000 mon.vm02 (mon.0) 491 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.264745+0000 mon.vm02 (mon.0) 492 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.264745+0000 mon.vm02 (mon.0) 492 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:14.268857+0000 mon.vm02 (mon.0) 493 : cluster [INF] osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286] boot 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:14.268857+0000 mon.vm02 (mon.0) 493 : cluster [INF] osd.0 [v2:192.168.123.102:6802/1698099286,v1:192.168.123.102:6803/1698099286] boot 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:14.268890+0000 mon.vm02 (mon.0) 494 : cluster [DBG] osdmap e16: 8 total, 1 up, 8 in 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: cluster 2026-03-10T08:37:14.268890+0000 mon.vm02 (mon.0) 494 : cluster [DBG] osdmap e16: 8 total, 1 up, 8 in 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269059+0000 mon.vm02 (mon.0) 495 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269059+0000 mon.vm02 (mon.0) 495 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269155+0000 mon.vm02 (mon.0) 496 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269155+0000 mon.vm02 (mon.0) 496 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269211+0000 mon.vm02 (mon.0) 497 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269211+0000 mon.vm02 (mon.0) 497 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269300+0000 mon.vm02 (mon.0) 498 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269300+0000 mon.vm02 (mon.0) 498 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269349+0000 mon.vm02 (mon.0) 499 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269349+0000 mon.vm02 (mon.0) 499 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269380+0000 mon.vm02 (mon.0) 500 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269380+0000 mon.vm02 (mon.0) 500 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269415+0000 mon.vm02 (mon.0) 501 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269415+0000 mon.vm02 (mon.0) 501 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269451+0000 mon.vm02 (mon.0) 502 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269451+0000 mon.vm02 (mon.0) 502 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269489+0000 mon.vm02 (mon.0) 503 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269489+0000 mon.vm02 (mon.0) 503 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269523+0000 mon.vm02 (mon.0) 504 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.269523+0000 mon.vm02 (mon.0) 504 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.495288+0000 mon.vm02 (mon.0) 505 : audit [INF] from='osd.1 ' entity='osd.1' 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.495288+0000 mon.vm02 (mon.0) 505 : audit [INF] from='osd.1 ' entity='osd.1' 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.599998+0000 mon.vm02 (mon.0) 506 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T08:37:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:14 vm02 bash[17473]: audit 2026-03-10T08:37:14.599998+0000 mon.vm02 (mon.0) 506 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:13.184255+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:13.184255+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:13.184301+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:13.184301+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:14.199164+0000 mgr.vm02.ttibzz (mgr.14195) 88 : cluster [DBG] pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:14.199164+0000 mgr.vm02.ttibzz (mgr.14195) 88 : cluster [DBG] pgmap v38: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.258309+0000 mon.vm02 (mon.0) 507 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.258309+0000 mon.vm02 (mon.0) 507 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.267321+0000 mon.vm02 (mon.0) 508 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.267321+0000 mon.vm02 (mon.0) 508 : audit [INF] from='osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.267418+0000 mon.vm02 (mon.0) 509 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.267418+0000 mon.vm02 (mon.0) 509 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.267494+0000 mon.vm02 (mon.0) 510 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.267494+0000 mon.vm02 (mon.0) 510 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:15.269574+0000 mon.vm02 (mon.0) 511 : cluster [INF] osd.1 [v2:192.168.123.107:6800/847448647,v1:192.168.123.107:6801/847448647] boot 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:15.269574+0000 mon.vm02 (mon.0) 511 : cluster [INF] osd.1 [v2:192.168.123.107:6800/847448647,v1:192.168.123.107:6801/847448647] boot 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:15.269617+0000 mon.vm02 (mon.0) 512 : cluster [DBG] osdmap e17: 8 total, 2 up, 8 in 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: cluster 2026-03-10T08:37:15.269617+0000 mon.vm02 (mon.0) 512 : cluster [DBG] osdmap e17: 8 total, 2 up, 8 in 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270236+0000 mon.vm02 (mon.0) 513 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270236+0000 mon.vm02 (mon.0) 513 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270849+0000 mon.vm02 (mon.0) 514 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270849+0000 mon.vm02 (mon.0) 514 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270920+0000 mon.vm02 (mon.0) 515 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270920+0000 mon.vm02 (mon.0) 515 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270951+0000 mon.vm02 (mon.0) 516 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270951+0000 mon.vm02 (mon.0) 516 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270977+0000 mon.vm02 (mon.0) 517 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.270977+0000 mon.vm02 (mon.0) 517 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.271003+0000 mon.vm02 (mon.0) 518 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.271003+0000 mon.vm02 (mon.0) 518 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.271031+0000 mon.vm02 (mon.0) 519 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.271031+0000 mon.vm02 (mon.0) 519 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.271099+0000 mon.vm02 (mon.0) 520 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.271099+0000 mon.vm02 (mon.0) 520 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.273368+0000 mon.vm02 (mon.0) 521 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.273368+0000 mon.vm02 (mon.0) 521 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:16.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.273603+0000 mon.vm02 (mon.0) 522 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:16.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.273603+0000 mon.vm02 (mon.0) 522 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:16.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.525575+0000 mon.vm02 (mon.0) 523 : audit [INF] from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T08:37:16.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.525575+0000 mon.vm02 (mon.0) 523 : audit [INF] from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T08:37:16.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.527702+0000 mon.vm07 (mon.1) 13 : audit [INF] from='osd.5 [v2:192.168.123.107:6816/2199389295,v1:192.168.123.107:6817/2199389295]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T08:37:16.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:15 vm02 bash[17473]: audit 2026-03-10T08:37:15.527702+0000 mon.vm07 (mon.1) 13 : audit [INF] from='osd.5 [v2:192.168.123.107:6816/2199389295,v1:192.168.123.107:6817/2199389295]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.280535+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.280535+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.280581+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.280581+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.690967+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.690967+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.691073+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:14.691073+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:15.772708+0000 mon.vm02 (mon.0) 524 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:15.772708+0000 mon.vm02 (mon.0) 524 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.066017+0000 mon.vm02 (mon.0) 525 : audit [INF] from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.066017+0000 mon.vm02 (mon.0) 525 : audit [INF] from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.069062+0000 mon.vm07 (mon.1) 14 : audit [INF] from='osd.7 [v2:192.168.123.107:6824/1605519816,v1:192.168.123.107:6825/1605519816]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.069062+0000 mon.vm07 (mon.1) 14 : audit [INF] from='osd.7 [v2:192.168.123.107:6824/1605519816,v1:192.168.123.107:6825/1605519816]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271335+0000 mon.vm02 (mon.0) 526 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271335+0000 mon.vm02 (mon.0) 526 : audit [INF] from='osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997]' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271388+0000 mon.vm02 (mon.0) 527 : audit [INF] from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271388+0000 mon.vm02 (mon.0) 527 : audit [INF] from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271414+0000 mon.vm02 (mon.0) 528 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271414+0000 mon.vm02 (mon.0) 528 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271504+0000 mon.vm02 (mon.0) 529 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.271504+0000 mon.vm02 (mon.0) 529 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:16.274855+0000 mon.vm02 (mon.0) 530 : cluster [INF] osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732] boot 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:16.274855+0000 mon.vm02 (mon.0) 530 : cluster [INF] osd.3 [v2:192.168.123.107:6808/2259832732,v1:192.168.123.107:6809/2259832732] boot 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:16.274882+0000 mon.vm02 (mon.0) 531 : cluster [DBG] osdmap e18: 8 total, 3 up, 8 in 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: cluster 2026-03-10T08:37:16.274882+0000 mon.vm02 (mon.0) 531 : cluster [DBG] osdmap e18: 8 total, 3 up, 8 in 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275780+0000 mon.vm02 (mon.0) 532 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275780+0000 mon.vm02 (mon.0) 532 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm02", "root=default"]}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275872+0000 mon.vm02 (mon.0) 533 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275872+0000 mon.vm02 (mon.0) 533 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275922+0000 mon.vm02 (mon.0) 534 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275922+0000 mon.vm02 (mon.0) 534 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-10T08:37:17.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275976+0000 mon.vm02 (mon.0) 535 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.275976+0000 mon.vm02 (mon.0) 535 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.276005+0000 mon.vm02 (mon.0) 536 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.276005+0000 mon.vm02 (mon.0) 536 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.276033+0000 mon.vm02 (mon.0) 537 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.276033+0000 mon.vm02 (mon.0) 537 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.278215+0000 mon.vm02 (mon.0) 538 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.278215+0000 mon.vm02 (mon.0) 538 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.280539+0000 mon.vm02 (mon.0) 539 : audit [INF] from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.280539+0000 mon.vm02 (mon.0) 539 : audit [INF] from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.280990+0000 mon.vm02 (mon.0) 540 : audit [INF] from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.280990+0000 mon.vm02 (mon.0) 540 : audit [INF] from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.281435+0000 mon.vm02 (mon.0) 541 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.281435+0000 mon.vm02 (mon.0) 541 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.283382+0000 mon.vm07 (mon.1) 15 : audit [INF] from='osd.5 [v2:192.168.123.107:6816/2199389295,v1:192.168.123.107:6817/2199389295]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.283382+0000 mon.vm07 (mon.1) 15 : audit [INF] from='osd.5 [v2:192.168.123.107:6816/2199389295,v1:192.168.123.107:6817/2199389295]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.284045+0000 mon.vm07 (mon.1) 16 : audit [INF] from='osd.7 [v2:192.168.123.107:6824/1605519816,v1:192.168.123.107:6825/1605519816]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.284045+0000 mon.vm07 (mon.1) 16 : audit [INF] from='osd.7 [v2:192.168.123.107:6824/1605519816,v1:192.168.123.107:6825/1605519816]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm07", "root=default"]}]: dispatch 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.300891+0000 mon.vm02 (mon.0) 542 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' 2026-03-10T08:37:17.041 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:16 vm02 bash[17473]: audit 2026-03-10T08:37:16.300891+0000 mon.vm02 (mon.0) 542 : audit [INF] from='osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903]' entity='osd.2' 2026-03-10T08:37:17.508 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:17.937 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:18.038 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":19,"num_osds":8,"num_up_osds":5,"osd_up_since":1773131837,"num_in_osds":8,"osd_in_since":1773131818,"num_remapped_pgs":0} 2026-03-10T08:37:18.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:15.589322+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:18.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:15.589322+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:18.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:15.589403+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:18.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:15.589403+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:18.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:16.199541+0000 mgr.vm02.ttibzz (mgr.14195) 89 : cluster [DBG] pgmap v41: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:16.199541+0000 mgr.vm02.ttibzz (mgr.14195) 89 : cluster [DBG] pgmap v41: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.274123+0000 mon.vm02 (mon.0) 543 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.274123+0000 mon.vm02 (mon.0) 543 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.275051+0000 mon.vm02 (mon.0) 544 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.275051+0000 mon.vm02 (mon.0) 544 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm02", "root=default"]}]': finished 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.275192+0000 mon.vm02 (mon.0) 545 : audit [INF] from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.275192+0000 mon.vm02 (mon.0) 545 : audit [INF] from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.277820+0000 mon.vm02 (mon.0) 546 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.277820+0000 mon.vm02 (mon.0) 546 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm07", "root=default"]}]': finished 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:17.282100+0000 mon.vm02 (mon.0) 547 : cluster [INF] osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997] boot 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:17.282100+0000 mon.vm02 (mon.0) 547 : cluster [INF] osd.4 [v2:192.168.123.102:6818/3617463997,v1:192.168.123.102:6819/3617463997] boot 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:17.282196+0000 mon.vm02 (mon.0) 548 : cluster [INF] osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903] boot 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:17.282196+0000 mon.vm02 (mon.0) 548 : cluster [INF] osd.2 [v2:192.168.123.102:6810/707108903,v1:192.168.123.102:6811/707108903] boot 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:17.282332+0000 mon.vm02 (mon.0) 549 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: cluster 2026-03-10T08:37:17.282332+0000 mon.vm02 (mon.0) 549 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283405+0000 mon.vm02 (mon.0) 550 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283405+0000 mon.vm02 (mon.0) 550 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283582+0000 mon.vm02 (mon.0) 551 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283582+0000 mon.vm02 (mon.0) 551 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283644+0000 mon.vm02 (mon.0) 552 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283644+0000 mon.vm02 (mon.0) 552 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283677+0000 mon.vm02 (mon.0) 553 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283677+0000 mon.vm02 (mon.0) 553 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283705+0000 mon.vm02 (mon.0) 554 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.283705+0000 mon.vm02 (mon.0) 554 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.287107+0000 mon.vm02 (mon.0) 555 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.287107+0000 mon.vm02 (mon.0) 555 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.287207+0000 mon.vm02 (mon.0) 556 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.287207+0000 mon.vm02 (mon.0) 556 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.288211+0000 mon.vm02 (mon.0) 557 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.291 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:17 vm02 bash[17473]: audit 2026-03-10T08:37:17.288211+0000 mon.vm02 (mon.0) 557 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.501156+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.501156+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.501208+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.501208+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.762109+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.762109+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.762161+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:16.762161+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:17.934184+0000 mon.vm02 (mon.0) 558 : audit [DBG] from='client.? 192.168.123.102:0/2205317790' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:17.934184+0000 mon.vm02 (mon.0) 558 : audit [DBG] from='client.? 192.168.123.102:0/2205317790' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.249587+0000 mon.vm02 (mon.0) 559 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.249587+0000 mon.vm02 (mon.0) 559 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.286000+0000 mon.vm02 (mon.0) 560 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.286000+0000 mon.vm02 (mon.0) 560 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.286582+0000 mon.vm02 (mon.0) 561 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.286582+0000 mon.vm02 (mon.0) 561 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.287024+0000 mon.vm02 (mon.0) 562 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.287024+0000 mon.vm02 (mon.0) 562 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.291821+0000 mon.vm02 (mon.0) 563 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.291821+0000 mon.vm02 (mon.0) 563 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:18.295855+0000 mon.vm02 (mon.0) 564 : cluster [INF] osd.7 [v2:192.168.123.107:6824/1605519816,v1:192.168.123.107:6825/1605519816] boot 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:18.295855+0000 mon.vm02 (mon.0) 564 : cluster [INF] osd.7 [v2:192.168.123.107:6824/1605519816,v1:192.168.123.107:6825/1605519816] boot 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:18.295961+0000 mon.vm02 (mon.0) 565 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: cluster 2026-03-10T08:37:18.295961+0000 mon.vm02 (mon.0) 565 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.297128+0000 mon.vm02 (mon.0) 566 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.297128+0000 mon.vm02 (mon.0) 566 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.297488+0000 mon.vm02 (mon.0) 567 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.297488+0000 mon.vm02 (mon.0) 567 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:18.896 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.297823+0000 mon.vm02 (mon.0) 568 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.897 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.297823+0000 mon.vm02 (mon.0) 568 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-10T08:37:18.897 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.298301+0000 mon.vm02 (mon.0) 569 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T08:37:18.897 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:18 vm02 bash[17473]: audit 2026-03-10T08:37:18.298301+0000 mon.vm02 (mon.0) 569 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-10T08:37:19.039 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd stat -f json 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:17.090126+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:17.090126+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:17.090180+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:17.090180+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:18.199717+0000 mgr.vm02.ttibzz (mgr.14195) 90 : cluster [DBG] pgmap v44: 0 pgs: ; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:18.199717+0000 mgr.vm02.ttibzz (mgr.14195) 90 : cluster [DBG] pgmap v44: 0 pgs: ; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:18.899345+0000 mon.vm02 (mon.0) 570 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:18.899345+0000 mon.vm02 (mon.0) 570 : audit [INF] from='osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798]' entity='osd.6' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:18.911564+0000 mon.vm02 (mon.0) 571 : audit [INF] from='osd.5 ' entity='osd.5' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:18.911564+0000 mon.vm02 (mon.0) 571 : audit [INF] from='osd.5 ' entity='osd.5' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.079903+0000 mon.vm02 (mon.0) 572 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.079903+0000 mon.vm02 (mon.0) 572 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.084194+0000 mon.vm02 (mon.0) 573 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.084194+0000 mon.vm02 (mon.0) 573 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.235308+0000 mon.vm02 (mon.0) 574 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.235308+0000 mon.vm02 (mon.0) 574 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.286419+0000 mon.vm02 (mon.0) 575 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.286419+0000 mon.vm02 (mon.0) 575 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.286495+0000 mon.vm02 (mon.0) 576 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.286495+0000 mon.vm02 (mon.0) 576 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.370485+0000 mon.vm02 (mon.0) 577 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.370485+0000 mon.vm02 (mon.0) 577 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:19.372970+0000 mon.vm02 (mon.0) 578 : cluster [INF] osd.5 [v2:192.168.123.107:6816/2199389295,v1:192.168.123.107:6817/2199389295] boot 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:19.372970+0000 mon.vm02 (mon.0) 578 : cluster [INF] osd.5 [v2:192.168.123.107:6816/2199389295,v1:192.168.123.107:6817/2199389295] boot 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:19.373035+0000 mon.vm02 (mon.0) 579 : cluster [INF] osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798] boot 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:19.373035+0000 mon.vm02 (mon.0) 579 : cluster [INF] osd.6 [v2:192.168.123.102:6826/468752798,v1:192.168.123.102:6827/468752798] boot 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:19.373141+0000 mon.vm02 (mon.0) 580 : cluster [DBG] osdmap e21: 8 total, 8 up, 8 in 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: cluster 2026-03-10T08:37:19.373141+0000 mon.vm02 (mon.0) 580 : cluster [DBG] osdmap e21: 8 total, 8 up, 8 in 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.373726+0000 mon.vm02 (mon.0) 581 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.373726+0000 mon.vm02 (mon.0) 581 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.373897+0000 mon.vm02 (mon.0) 582 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.373897+0000 mon.vm02 (mon.0) 582 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.783535+0000 mon.vm02 (mon.0) 583 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.783535+0000 mon.vm02 (mon.0) 583 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.789393+0000 mon.vm02 (mon.0) 584 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.789393+0000 mon.vm02 (mon.0) 584 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.830434+0000 mon.vm02 (mon.0) 585 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:37:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:19 vm02 bash[17473]: audit 2026-03-10T08:37:19.830434+0000 mon.vm02 (mon.0) 585 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:37:22.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: cluster 2026-03-10T08:37:20.200063+0000 mgr.vm02.ttibzz (mgr.14195) 91 : cluster [DBG] pgmap v47: 1 pgs: 1 creating+peering; 0 B data, 558 MiB used, 119 GiB / 120 GiB avail 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: cluster 2026-03-10T08:37:20.200063+0000 mgr.vm02.ttibzz (mgr.14195) 91 : cluster [DBG] pgmap v47: 1 pgs: 1 creating+peering; 0 B data, 558 MiB used, 119 GiB / 120 GiB avail 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: cluster 2026-03-10T08:37:20.687865+0000 mon.vm02 (mon.0) 586 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: cluster 2026-03-10T08:37:20.687865+0000 mon.vm02 (mon.0) 586 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.910822+0000 mon.vm02 (mon.0) 587 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.910822+0000 mon.vm02 (mon.0) 587 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.928559+0000 mon.vm02 (mon.0) 588 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.928559+0000 mon.vm02 (mon.0) 588 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.929003+0000 mon.vm02 (mon.0) 589 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.929003+0000 mon.vm02 (mon.0) 589 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.929133+0000 mon.vm02 (mon.0) 590 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.929133+0000 mon.vm02 (mon.0) 590 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.931055+0000 mon.vm02 (mon.0) 591 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.931055+0000 mon.vm02 (mon.0) 591 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm02"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.931151+0000 mon.vm02 (mon.0) 592 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.931151+0000 mon.vm02 (mon.0) 592 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "mon metadata", "id": "vm07"}]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.934174+0000 mon.vm07 (mon.1) 17 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.934174+0000 mon.vm07 (mon.1) 17 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.951752+0000 mon.vm07 (mon.1) 18 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T08:37:22.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:21 vm02 bash[17473]: audit 2026-03-10T08:37:20.951752+0000 mon.vm07 (mon.1) 18 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-10T08:37:23.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:22 vm02 bash[17473]: cluster 2026-03-10T08:37:21.613334+0000 mon.vm02 (mon.0) 593 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-03-10T08:37:23.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:22 vm02 bash[17473]: cluster 2026-03-10T08:37:21.613334+0000 mon.vm02 (mon.0) 593 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-03-10T08:37:23.695 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:24.022 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:24.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:23 vm02 bash[17473]: cluster 2026-03-10T08:37:22.200318+0000 mgr.vm02.ttibzz (mgr.14195) 92 : cluster [DBG] pgmap v50: 1 pgs: 1 creating+peering; 0 B data, 1011 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:24.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:23 vm02 bash[17473]: cluster 2026-03-10T08:37:22.200318+0000 mgr.vm02.ttibzz (mgr.14195) 92 : cluster [DBG] pgmap v50: 1 pgs: 1 creating+peering; 0 B data, 1011 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:24.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:23 vm02 bash[17473]: cluster 2026-03-10T08:37:22.632042+0000 mon.vm02 (mon.0) 594 : cluster [DBG] mgrmap e18: vm02.ttibzz(active, since 78s), standbys: vm07.aunzpk 2026-03-10T08:37:24.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:23 vm02 bash[17473]: cluster 2026-03-10T08:37:22.632042+0000 mon.vm02 (mon.0) 594 : cluster [DBG] mgrmap e18: vm02.ttibzz(active, since 78s), standbys: vm07.aunzpk 2026-03-10T08:37:24.087 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":23,"num_osds":8,"num_up_osds":8,"osd_up_since":1773131839,"num_in_osds":8,"osd_in_since":1773131818,"num_remapped_pgs":0} 2026-03-10T08:37:24.087 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd dump --format=json 2026-03-10T08:37:25.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:24 vm02 bash[17473]: audit 2026-03-10T08:37:24.021307+0000 mon.vm07 (mon.1) 19 : audit [DBG] from='client.? 192.168.123.102:0/2796153142' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:25.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:24 vm02 bash[17473]: audit 2026-03-10T08:37:24.021307+0000 mon.vm07 (mon.1) 19 : audit [DBG] from='client.? 192.168.123.102:0/2796153142' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-10T08:37:25.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:24 vm02 bash[17473]: audit 2026-03-10T08:37:24.380446+0000 mon.vm02 (mon.0) 595 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:25.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:24 vm02 bash[17473]: audit 2026-03-10T08:37:24.380446+0000 mon.vm02 (mon.0) 595 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:25.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:24 vm02 bash[17473]: audit 2026-03-10T08:37:24.385468+0000 mon.vm02 (mon.0) 596 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:25.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:24 vm02 bash[17473]: audit 2026-03-10T08:37:24.385468+0000 mon.vm02 (mon.0) 596 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:25 vm02 bash[17473]: cluster 2026-03-10T08:37:24.200552+0000 mgr.vm02.ttibzz (mgr.14195) 93 : cluster [DBG] pgmap v51: 1 pgs: 1 creating+peering; 0 B data, 1012 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:25 vm02 bash[17473]: cluster 2026-03-10T08:37:24.200552+0000 mgr.vm02.ttibzz (mgr.14195) 93 : cluster [DBG] pgmap v51: 1 pgs: 1 creating+peering; 0 B data, 1012 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:25 vm02 bash[17473]: audit 2026-03-10T08:37:24.902984+0000 mon.vm02 (mon.0) 597 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:25 vm02 bash[17473]: audit 2026-03-10T08:37:24.902984+0000 mon.vm02 (mon.0) 597 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:25 vm02 bash[17473]: audit 2026-03-10T08:37:24.908483+0000 mon.vm02 (mon.0) 598 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:26.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:25 vm02 bash[17473]: audit 2026-03-10T08:37:24.908483+0000 mon.vm02 (mon.0) 598 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:28.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:27 vm02 bash[17473]: cluster 2026-03-10T08:37:26.200871+0000 mgr.vm02.ttibzz (mgr.14195) 94 : cluster [DBG] pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:28.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:27 vm02 bash[17473]: cluster 2026-03-10T08:37:26.200871+0000 mgr.vm02.ttibzz (mgr.14195) 94 : cluster [DBG] pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:28.745 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:29.144 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:29.144 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":23,"fsid":"e750d050-1c5b-11f1-9e63-531fde0192f6","created":"2026-03-10T08:35:03.450392+0000","modified":"2026-03-10T08:37:21.603756+0000","last_up_change":"2026-03-10T08:37:19.291741+0000","last_in_change":"2026-03-10T08:36:58.181437+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":8,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T08:37:18.252662+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"23","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"1dafd277-7c1e-4421-9e79-1d111d605b64","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6802","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6803","nonce":1698099286}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6804","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6805","nonce":1698099286}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6808","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6809","nonce":1698099286}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6806","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6807","nonce":1698099286}]},"public_addr":"192.168.123.102:6803/1698099286","cluster_addr":"192.168.123.102:6805/1698099286","heartbeat_back_addr":"192.168.123.102:6809/1698099286","heartbeat_front_addr":"192.168.123.102:6807/1698099286","state":["exists","up"]},{"osd":1,"uuid":"1b1ad213-cbed-4d89-b55b-35bf90079e6d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6801","nonce":847448647}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6803","nonce":847448647}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6807","nonce":847448647}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6805","nonce":847448647}]},"public_addr":"192.168.123.107:6801/847448647","cluster_addr":"192.168.123.107:6803/847448647","heartbeat_back_addr":"192.168.123.107:6807/847448647","heartbeat_front_addr":"192.168.123.107:6805/847448647","state":["exists","up"]},{"osd":2,"uuid":"0b4900c4-461a-401b-8901-8eb5567ddaa3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6810","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6811","nonce":707108903}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6812","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6813","nonce":707108903}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6816","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6817","nonce":707108903}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6814","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6815","nonce":707108903}]},"public_addr":"192.168.123.102:6811/707108903","cluster_addr":"192.168.123.102:6813/707108903","heartbeat_back_addr":"192.168.123.102:6817/707108903","heartbeat_front_addr":"192.168.123.102:6815/707108903","state":["exists","up"]},{"osd":3,"uuid":"1f0eade6-7d8c-4351-968a-f172320dc16e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6809","nonce":2259832732}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6811","nonce":2259832732}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6815","nonce":2259832732}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6813","nonce":2259832732}]},"public_addr":"192.168.123.107:6809/2259832732","cluster_addr":"192.168.123.107:6811/2259832732","heartbeat_back_addr":"192.168.123.107:6815/2259832732","heartbeat_front_addr":"192.168.123.107:6813/2259832732","state":["exists","up"]},{"osd":4,"uuid":"046bd431-09bd-47e8-9f2b-13ec28873f26","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6818","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6819","nonce":3617463997}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6820","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6821","nonce":3617463997}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6824","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6825","nonce":3617463997}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6822","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6823","nonce":3617463997}]},"public_addr":"192.168.123.102:6819/3617463997","cluster_addr":"192.168.123.102:6821/3617463997","heartbeat_back_addr":"192.168.123.102:6825/3617463997","heartbeat_front_addr":"192.168.123.102:6823/3617463997","state":["exists","up"]},{"osd":5,"uuid":"121b84e4-6673-42a6-a5d5-5b1356bedbf6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6816","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6817","nonce":2199389295}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6818","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6819","nonce":2199389295}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6822","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6823","nonce":2199389295}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6820","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6821","nonce":2199389295}]},"public_addr":"192.168.123.107:6817/2199389295","cluster_addr":"192.168.123.107:6819/2199389295","heartbeat_back_addr":"192.168.123.107:6823/2199389295","heartbeat_front_addr":"192.168.123.107:6821/2199389295","state":["exists","up"]},{"osd":6,"uuid":"2846d411-a5e4-48a3-92a9-ede09394307e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6826","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6827","nonce":468752798}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6828","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6829","nonce":468752798}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6832","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6833","nonce":468752798}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6830","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6831","nonce":468752798}]},"public_addr":"192.168.123.102:6827/468752798","cluster_addr":"192.168.123.102:6829/468752798","heartbeat_back_addr":"192.168.123.102:6833/468752798","heartbeat_front_addr":"192.168.123.102:6831/468752798","state":["exists","up"]},{"osd":7,"uuid":"94f52a63-655e-4086-bdc2-473506492269","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6824","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6825","nonce":1605519816}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6826","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6827","nonce":1605519816}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6830","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6831","nonce":1605519816}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6828","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6829","nonce":1605519816}]},"public_addr":"192.168.123.107:6825/1605519816","cluster_addr":"192.168.123.107:6827/1605519816","heartbeat_back_addr":"192.168.123.107:6831/1605519816","heartbeat_front_addr":"192.168.123.107:6829/1605519816","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:12.499739+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:13.184302+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:14.691074+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:14.280583+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:15.589406+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:16.501210+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:16.762163+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:17.090182+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.102:0/1060985559":"2026-03-11T08:36:04.177951+0000","192.168.123.102:0/3726942748":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/1101409822":"2026-03-11T08:35:13.970779+0000","192.168.123.102:6801/3140428636":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/4158772845":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/112089711":"2026-03-11T08:36:04.177951+0000","192.168.123.102:6800/3140428636":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/403379110":"2026-03-11T08:35:25.890482+0000","192.168.123.102:0/801657235":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6800/193015271":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6801/193015271":"2026-03-11T08:35:25.890482+0000","192.168.123.102:0/1503313399":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6800/3560899904":"2026-03-11T08:36:04.177951+0000","192.168.123.102:6801/3560899904":"2026-03-11T08:36:04.177951+0000","192.168.123.102:0/2516377943":"2026-03-11T08:36:04.177951+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T08:37:29.491 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-10T08:37:18.252662+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '23', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-03-10T08:37:29.491 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd pool get .mgr pg_num 2026-03-10T08:37:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:29 vm02 bash[17473]: cluster 2026-03-10T08:37:28.201211+0000 mgr.vm02.ttibzz (mgr.14195) 95 : cluster [DBG] pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:29 vm02 bash[17473]: cluster 2026-03-10T08:37:28.201211+0000 mgr.vm02.ttibzz (mgr.14195) 95 : cluster [DBG] pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:29 vm02 bash[17473]: audit 2026-03-10T08:37:29.140021+0000 mon.vm02 (mon.0) 599 : audit [DBG] from='client.? 192.168.123.102:0/332717713' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T08:37:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:29 vm02 bash[17473]: audit 2026-03-10T08:37:29.140021+0000 mon.vm02 (mon.0) 599 : audit [DBG] from='client.? 192.168.123.102:0/332717713' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: cephadm 2026-03-10T08:37:29.856441+0000 mgr.vm02.ttibzz (mgr.14195) 96 : cephadm [INF] Detected new or changed devices on vm02 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: cephadm 2026-03-10T08:37:29.856441+0000 mgr.vm02.ttibzz (mgr.14195) 96 : cephadm [INF] Detected new or changed devices on vm02 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:29.866182+0000 mon.vm02 (mon.0) 600 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:29.866182+0000 mon.vm02 (mon.0) 600 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:29.871735+0000 mon.vm02 (mon.0) 601 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:29.871735+0000 mon.vm02 (mon.0) 601 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:29.872776+0000 mon.vm02 (mon.0) 602 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:29.872776+0000 mon.vm02 (mon.0) 602 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.765584+0000 mon.vm02 (mon.0) 603 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.765584+0000 mon.vm02 (mon.0) 603 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.770104+0000 mon.vm02 (mon.0) 604 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.770104+0000 mon.vm02 (mon.0) 604 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.771066+0000 mon.vm02 (mon.0) 605 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.771066+0000 mon.vm02 (mon.0) 605 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.771842+0000 mon.vm02 (mon.0) 606 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.771842+0000 mon.vm02 (mon.0) 606 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.772238+0000 mon.vm02 (mon.0) 607 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.772238+0000 mon.vm02 (mon.0) 607 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.776397+0000 mon.vm02 (mon.0) 608 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.776397+0000 mon.vm02 (mon.0) 608 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.777978+0000 mon.vm02 (mon.0) 609 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:37:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:30 vm02 bash[17473]: audit 2026-03-10T08:37:30.777978+0000 mon.vm02 (mon.0) 609 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:37:32.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:31 vm02 bash[17473]: cluster 2026-03-10T08:37:30.201484+0000 mgr.vm02.ttibzz (mgr.14195) 97 : cluster [DBG] pgmap v54: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:32.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:31 vm02 bash[17473]: cluster 2026-03-10T08:37:30.201484+0000 mgr.vm02.ttibzz (mgr.14195) 97 : cluster [DBG] pgmap v54: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:32.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:31 vm02 bash[17473]: cephadm 2026-03-10T08:37:30.759689+0000 mgr.vm02.ttibzz (mgr.14195) 98 : cephadm [INF] Detected new or changed devices on vm07 2026-03-10T08:37:32.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:31 vm02 bash[17473]: cephadm 2026-03-10T08:37:30.759689+0000 mgr.vm02.ttibzz (mgr.14195) 98 : cephadm [INF] Detected new or changed devices on vm07 2026-03-10T08:37:34.142 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:34.155 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:33 vm02 bash[17473]: cluster 2026-03-10T08:37:32.201766+0000 mgr.vm02.ttibzz (mgr.14195) 99 : cluster [DBG] pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:34.156 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:33 vm02 bash[17473]: cluster 2026-03-10T08:37:32.201766+0000 mgr.vm02.ttibzz (mgr.14195) 99 : cluster [DBG] pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:34.412 INFO:teuthology.orchestra.run.vm02.stdout:pg_num: 1 2026-03-10T08:37:34.462 INFO:tasks.cephadm:Setting up client nodes... 2026-03-10T08:37:34.462 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T08:37:35.154 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:34 vm02 bash[17473]: audit 2026-03-10T08:37:34.235513+0000 mon.vm02 (mon.0) 610 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:35.154 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:34 vm02 bash[17473]: audit 2026-03-10T08:37:34.235513+0000 mon.vm02 (mon.0) 610 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:35.154 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:34 vm02 bash[17473]: audit 2026-03-10T08:37:34.409079+0000 mon.vm02 (mon.0) 611 : audit [DBG] from='client.? 192.168.123.102:0/3943226296' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T08:37:35.154 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:34 vm02 bash[17473]: audit 2026-03-10T08:37:34.409079+0000 mon.vm02 (mon.0) 611 : audit [DBG] from='client.? 192.168.123.102:0/3943226296' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-10T08:37:36.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:35 vm02 bash[17473]: cluster 2026-03-10T08:37:34.202017+0000 mgr.vm02.ttibzz (mgr.14195) 100 : cluster [DBG] pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:36.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:35 vm02 bash[17473]: cluster 2026-03-10T08:37:34.202017+0000 mgr.vm02.ttibzz (mgr.14195) 100 : cluster [DBG] pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:38.179 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:38.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:37 vm02 bash[17473]: cluster 2026-03-10T08:37:36.202299+0000 mgr.vm02.ttibzz (mgr.14195) 101 : cluster [DBG] pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:38.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:37 vm02 bash[17473]: cluster 2026-03-10T08:37:36.202299+0000 mgr.vm02.ttibzz (mgr.14195) 101 : cluster [DBG] pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:38.470 INFO:teuthology.orchestra.run.vm02.stdout:[client.0] 2026-03-10T08:37:38.471 INFO:teuthology.orchestra.run.vm02.stdout: key = AQBS2K9pLtavGxAAEomxBzk7ld8S5jTSUwmppQ== 2026-03-10T08:37:38.520 DEBUG:teuthology.orchestra.run.vm02:> set -ex 2026-03-10T08:37:38.520 DEBUG:teuthology.orchestra.run.vm02:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-10T08:37:38.520 DEBUG:teuthology.orchestra.run.vm02:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-10T08:37:38.531 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-10T08:37:39.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:38 vm02 bash[17473]: cluster 2026-03-10T08:37:38.202589+0000 mgr.vm02.ttibzz (mgr.14195) 102 : cluster [DBG] pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:39.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:38 vm02 bash[17473]: cluster 2026-03-10T08:37:38.202589+0000 mgr.vm02.ttibzz (mgr.14195) 102 : cluster [DBG] pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:39.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:38 vm02 bash[17473]: audit 2026-03-10T08:37:38.464398+0000 mon.vm02 (mon.0) 612 : audit [INF] from='client.? 192.168.123.102:0/572102857' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T08:37:39.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:38 vm02 bash[17473]: audit 2026-03-10T08:37:38.464398+0000 mon.vm02 (mon.0) 612 : audit [INF] from='client.? 192.168.123.102:0/572102857' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T08:37:39.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:38 vm02 bash[17473]: audit 2026-03-10T08:37:38.466915+0000 mon.vm02 (mon.0) 613 : audit [INF] from='client.? 192.168.123.102:0/572102857' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T08:37:39.194 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:38 vm02 bash[17473]: audit 2026-03-10T08:37:38.466915+0000 mon.vm02 (mon.0) 613 : audit [INF] from='client.? 192.168.123.102:0/572102857' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T08:37:41.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:41 vm02 bash[17473]: cluster 2026-03-10T08:37:40.202851+0000 mgr.vm02.ttibzz (mgr.14195) 103 : cluster [DBG] pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:41.838 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:41 vm02 bash[17473]: cluster 2026-03-10T08:37:40.202851+0000 mgr.vm02.ttibzz (mgr.14195) 103 : cluster [DBG] pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:43.152 INFO:teuthology.orchestra.run.vm07.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm07/config 2026-03-10T08:37:43.433 INFO:teuthology.orchestra.run.vm07.stdout:[client.1] 2026-03-10T08:37:43.433 INFO:teuthology.orchestra.run.vm07.stdout: key = AQBX2K9pr+JiGRAAepX2wPb9uokIi2X1czERfg== 2026-03-10T08:37:43.490 DEBUG:teuthology.orchestra.run.vm07:> set -ex 2026-03-10T08:37:43.490 DEBUG:teuthology.orchestra.run.vm07:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-10T08:37:43.490 DEBUG:teuthology.orchestra.run.vm07:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-10T08:37:43.500 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-10T08:37:43.500 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-10T08:37:43.500 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph mgr dump --format=json 2026-03-10T08:37:43.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:43 vm02 bash[17473]: cluster 2026-03-10T08:37:42.203109+0000 mgr.vm02.ttibzz (mgr.14195) 104 : cluster [DBG] pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:43.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:43 vm02 bash[17473]: cluster 2026-03-10T08:37:42.203109+0000 mgr.vm02.ttibzz (mgr.14195) 104 : cluster [DBG] pgmap v60: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:44.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:44 vm02 bash[17473]: audit 2026-03-10T08:37:43.425817+0000 mon.vm02 (mon.0) 614 : audit [INF] from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T08:37:44.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:44 vm02 bash[17473]: audit 2026-03-10T08:37:43.425817+0000 mon.vm02 (mon.0) 614 : audit [INF] from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T08:37:44.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:44 vm02 bash[17473]: audit 2026-03-10T08:37:43.428052+0000 mon.vm02 (mon.0) 615 : audit [INF] from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T08:37:44.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:44 vm02 bash[17473]: audit 2026-03-10T08:37:43.428052+0000 mon.vm02 (mon.0) 615 : audit [INF] from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-10T08:37:44.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:44 vm02 bash[17473]: audit 2026-03-10T08:37:43.428727+0000 mon.vm07 (mon.1) 20 : audit [INF] from='client.? 192.168.123.107:0/4044509837' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T08:37:44.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:44 vm02 bash[17473]: audit 2026-03-10T08:37:43.428727+0000 mon.vm07 (mon.1) 20 : audit [INF] from='client.? 192.168.123.107:0/4044509837' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-10T08:37:45.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:45 vm02 bash[17473]: cluster 2026-03-10T08:37:44.203375+0000 mgr.vm02.ttibzz (mgr.14195) 105 : cluster [DBG] pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:45.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:45 vm02 bash[17473]: cluster 2026-03-10T08:37:44.203375+0000 mgr.vm02.ttibzz (mgr.14195) 105 : cluster [DBG] pgmap v61: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:47.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:47 vm02 bash[17473]: cluster 2026-03-10T08:37:46.203669+0000 mgr.vm02.ttibzz (mgr.14195) 106 : cluster [DBG] pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:47.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:47 vm02 bash[17473]: cluster 2026-03-10T08:37:46.203669+0000 mgr.vm02.ttibzz (mgr.14195) 106 : cluster [DBG] pgmap v62: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:48.126 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:48.389 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:48.438 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":18,"flags":0,"active_gid":14195,"active_name":"vm02.ttibzz","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6800","nonce":3144752818},{"type":"v1","addr":"192.168.123.102:6801","nonce":3144752818}]},"active_addr":"192.168.123.102:6801/3144752818","active_change":"2026-03-10T08:36:04.178268+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":14210,"name":"vm07.aunzpk","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.102:8443/","prometheus":"http://192.168.123.102:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":2302761165}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":2035516868}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":3193007949}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.102:0","nonce":3855349576}]}]} 2026-03-10T08:37:48.439 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-10T08:37:48.440 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-10T08:37:48.440 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd dump --format=json 2026-03-10T08:37:48.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:48 vm02 bash[17473]: audit 2026-03-10T08:37:48.384099+0000 mon.vm02 (mon.0) 616 : audit [DBG] from='client.? 192.168.123.102:0/81985059' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T08:37:48.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:48 vm02 bash[17473]: audit 2026-03-10T08:37:48.384099+0000 mon.vm02 (mon.0) 616 : audit [DBG] from='client.? 192.168.123.102:0/81985059' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-10T08:37:49.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:49 vm02 bash[17473]: cluster 2026-03-10T08:37:48.203940+0000 mgr.vm02.ttibzz (mgr.14195) 107 : cluster [DBG] pgmap v63: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:49.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:49 vm02 bash[17473]: cluster 2026-03-10T08:37:48.203940+0000 mgr.vm02.ttibzz (mgr.14195) 107 : cluster [DBG] pgmap v63: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:49.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:49 vm02 bash[17473]: audit 2026-03-10T08:37:49.235751+0000 mon.vm02 (mon.0) 617 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:49.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:49 vm02 bash[17473]: audit 2026-03-10T08:37:49.235751+0000 mon.vm02 (mon.0) 617 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:37:51.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:51 vm02 bash[17473]: cluster 2026-03-10T08:37:50.204183+0000 mgr.vm02.ttibzz (mgr.14195) 108 : cluster [DBG] pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:51.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:51 vm02 bash[17473]: cluster 2026-03-10T08:37:50.204183+0000 mgr.vm02.ttibzz (mgr.14195) 108 : cluster [DBG] pgmap v64: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:52.154 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:52.581 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:52.582 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":23,"fsid":"e750d050-1c5b-11f1-9e63-531fde0192f6","created":"2026-03-10T08:35:03.450392+0000","modified":"2026-03-10T08:37:21.603756+0000","last_up_change":"2026-03-10T08:37:19.291741+0000","last_in_change":"2026-03-10T08:36:58.181437+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":8,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T08:37:18.252662+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"23","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"1dafd277-7c1e-4421-9e79-1d111d605b64","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6802","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6803","nonce":1698099286}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6804","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6805","nonce":1698099286}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6808","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6809","nonce":1698099286}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6806","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6807","nonce":1698099286}]},"public_addr":"192.168.123.102:6803/1698099286","cluster_addr":"192.168.123.102:6805/1698099286","heartbeat_back_addr":"192.168.123.102:6809/1698099286","heartbeat_front_addr":"192.168.123.102:6807/1698099286","state":["exists","up"]},{"osd":1,"uuid":"1b1ad213-cbed-4d89-b55b-35bf90079e6d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6801","nonce":847448647}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6803","nonce":847448647}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6807","nonce":847448647}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6805","nonce":847448647}]},"public_addr":"192.168.123.107:6801/847448647","cluster_addr":"192.168.123.107:6803/847448647","heartbeat_back_addr":"192.168.123.107:6807/847448647","heartbeat_front_addr":"192.168.123.107:6805/847448647","state":["exists","up"]},{"osd":2,"uuid":"0b4900c4-461a-401b-8901-8eb5567ddaa3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6810","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6811","nonce":707108903}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6812","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6813","nonce":707108903}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6816","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6817","nonce":707108903}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6814","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6815","nonce":707108903}]},"public_addr":"192.168.123.102:6811/707108903","cluster_addr":"192.168.123.102:6813/707108903","heartbeat_back_addr":"192.168.123.102:6817/707108903","heartbeat_front_addr":"192.168.123.102:6815/707108903","state":["exists","up"]},{"osd":3,"uuid":"1f0eade6-7d8c-4351-968a-f172320dc16e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6809","nonce":2259832732}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6811","nonce":2259832732}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6815","nonce":2259832732}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6813","nonce":2259832732}]},"public_addr":"192.168.123.107:6809/2259832732","cluster_addr":"192.168.123.107:6811/2259832732","heartbeat_back_addr":"192.168.123.107:6815/2259832732","heartbeat_front_addr":"192.168.123.107:6813/2259832732","state":["exists","up"]},{"osd":4,"uuid":"046bd431-09bd-47e8-9f2b-13ec28873f26","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6818","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6819","nonce":3617463997}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6820","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6821","nonce":3617463997}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6824","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6825","nonce":3617463997}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6822","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6823","nonce":3617463997}]},"public_addr":"192.168.123.102:6819/3617463997","cluster_addr":"192.168.123.102:6821/3617463997","heartbeat_back_addr":"192.168.123.102:6825/3617463997","heartbeat_front_addr":"192.168.123.102:6823/3617463997","state":["exists","up"]},{"osd":5,"uuid":"121b84e4-6673-42a6-a5d5-5b1356bedbf6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6816","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6817","nonce":2199389295}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6818","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6819","nonce":2199389295}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6822","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6823","nonce":2199389295}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6820","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6821","nonce":2199389295}]},"public_addr":"192.168.123.107:6817/2199389295","cluster_addr":"192.168.123.107:6819/2199389295","heartbeat_back_addr":"192.168.123.107:6823/2199389295","heartbeat_front_addr":"192.168.123.107:6821/2199389295","state":["exists","up"]},{"osd":6,"uuid":"2846d411-a5e4-48a3-92a9-ede09394307e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6826","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6827","nonce":468752798}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6828","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6829","nonce":468752798}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6832","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6833","nonce":468752798}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6830","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6831","nonce":468752798}]},"public_addr":"192.168.123.102:6827/468752798","cluster_addr":"192.168.123.102:6829/468752798","heartbeat_back_addr":"192.168.123.102:6833/468752798","heartbeat_front_addr":"192.168.123.102:6831/468752798","state":["exists","up"]},{"osd":7,"uuid":"94f52a63-655e-4086-bdc2-473506492269","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6824","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6825","nonce":1605519816}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6826","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6827","nonce":1605519816}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6830","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6831","nonce":1605519816}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6828","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6829","nonce":1605519816}]},"public_addr":"192.168.123.107:6825/1605519816","cluster_addr":"192.168.123.107:6827/1605519816","heartbeat_back_addr":"192.168.123.107:6831/1605519816","heartbeat_front_addr":"192.168.123.107:6829/1605519816","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:12.499739+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:13.184302+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:14.691074+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:14.280583+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:15.589406+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:16.501210+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:16.762163+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:17.090182+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.102:0/1060985559":"2026-03-11T08:36:04.177951+0000","192.168.123.102:0/3726942748":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/1101409822":"2026-03-11T08:35:13.970779+0000","192.168.123.102:6801/3140428636":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/4158772845":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/112089711":"2026-03-11T08:36:04.177951+0000","192.168.123.102:6800/3140428636":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/403379110":"2026-03-11T08:35:25.890482+0000","192.168.123.102:0/801657235":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6800/193015271":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6801/193015271":"2026-03-11T08:35:25.890482+0000","192.168.123.102:0/1503313399":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6800/3560899904":"2026-03-11T08:36:04.177951+0000","192.168.123.102:6801/3560899904":"2026-03-11T08:36:04.177951+0000","192.168.123.102:0/2516377943":"2026-03-11T08:36:04.177951+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T08:37:52.630 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-10T08:37:52.630 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd dump --format=json 2026-03-10T08:37:53.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:53 vm02 bash[17473]: cluster 2026-03-10T08:37:52.204416+0000 mgr.vm02.ttibzz (mgr.14195) 109 : cluster [DBG] pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:53.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:53 vm02 bash[17473]: cluster 2026-03-10T08:37:52.204416+0000 mgr.vm02.ttibzz (mgr.14195) 109 : cluster [DBG] pgmap v65: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:53.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:53 vm02 bash[17473]: audit 2026-03-10T08:37:52.577867+0000 mon.vm02 (mon.0) 618 : audit [DBG] from='client.? 192.168.123.102:0/2402644845' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T08:37:53.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:53 vm02 bash[17473]: audit 2026-03-10T08:37:52.577867+0000 mon.vm02 (mon.0) 618 : audit [DBG] from='client.? 192.168.123.102:0/2402644845' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T08:37:55.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:55 vm02 bash[17473]: cluster 2026-03-10T08:37:54.204643+0000 mgr.vm02.ttibzz (mgr.14195) 110 : cluster [DBG] pgmap v66: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:55.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:55 vm02 bash[17473]: cluster 2026-03-10T08:37:54.204643+0000 mgr.vm02.ttibzz (mgr.14195) 110 : cluster [DBG] pgmap v66: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:57.274 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:37:57.514 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:37:57.514 INFO:teuthology.orchestra.run.vm02.stdout:{"epoch":23,"fsid":"e750d050-1c5b-11f1-9e63-531fde0192f6","created":"2026-03-10T08:35:03.450392+0000","modified":"2026-03-10T08:37:21.603756+0000","last_up_change":"2026-03-10T08:37:19.291741+0000","last_in_change":"2026-03-10T08:36:58.181437+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":8,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-10T08:37:18.252662+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"23","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"1dafd277-7c1e-4421-9e79-1d111d605b64","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6802","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6803","nonce":1698099286}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6804","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6805","nonce":1698099286}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6808","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6809","nonce":1698099286}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6806","nonce":1698099286},{"type":"v1","addr":"192.168.123.102:6807","nonce":1698099286}]},"public_addr":"192.168.123.102:6803/1698099286","cluster_addr":"192.168.123.102:6805/1698099286","heartbeat_back_addr":"192.168.123.102:6809/1698099286","heartbeat_front_addr":"192.168.123.102:6807/1698099286","state":["exists","up"]},{"osd":1,"uuid":"1b1ad213-cbed-4d89-b55b-35bf90079e6d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6800","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6801","nonce":847448647}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6802","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6803","nonce":847448647}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6806","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6807","nonce":847448647}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6804","nonce":847448647},{"type":"v1","addr":"192.168.123.107:6805","nonce":847448647}]},"public_addr":"192.168.123.107:6801/847448647","cluster_addr":"192.168.123.107:6803/847448647","heartbeat_back_addr":"192.168.123.107:6807/847448647","heartbeat_front_addr":"192.168.123.107:6805/847448647","state":["exists","up"]},{"osd":2,"uuid":"0b4900c4-461a-401b-8901-8eb5567ddaa3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6810","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6811","nonce":707108903}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6812","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6813","nonce":707108903}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6816","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6817","nonce":707108903}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6814","nonce":707108903},{"type":"v1","addr":"192.168.123.102:6815","nonce":707108903}]},"public_addr":"192.168.123.102:6811/707108903","cluster_addr":"192.168.123.102:6813/707108903","heartbeat_back_addr":"192.168.123.102:6817/707108903","heartbeat_front_addr":"192.168.123.102:6815/707108903","state":["exists","up"]},{"osd":3,"uuid":"1f0eade6-7d8c-4351-968a-f172320dc16e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6808","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6809","nonce":2259832732}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6810","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6811","nonce":2259832732}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6814","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6815","nonce":2259832732}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6812","nonce":2259832732},{"type":"v1","addr":"192.168.123.107:6813","nonce":2259832732}]},"public_addr":"192.168.123.107:6809/2259832732","cluster_addr":"192.168.123.107:6811/2259832732","heartbeat_back_addr":"192.168.123.107:6815/2259832732","heartbeat_front_addr":"192.168.123.107:6813/2259832732","state":["exists","up"]},{"osd":4,"uuid":"046bd431-09bd-47e8-9f2b-13ec28873f26","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6818","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6819","nonce":3617463997}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6820","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6821","nonce":3617463997}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6824","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6825","nonce":3617463997}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6822","nonce":3617463997},{"type":"v1","addr":"192.168.123.102:6823","nonce":3617463997}]},"public_addr":"192.168.123.102:6819/3617463997","cluster_addr":"192.168.123.102:6821/3617463997","heartbeat_back_addr":"192.168.123.102:6825/3617463997","heartbeat_front_addr":"192.168.123.102:6823/3617463997","state":["exists","up"]},{"osd":5,"uuid":"121b84e4-6673-42a6-a5d5-5b1356bedbf6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6816","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6817","nonce":2199389295}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6818","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6819","nonce":2199389295}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6822","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6823","nonce":2199389295}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6820","nonce":2199389295},{"type":"v1","addr":"192.168.123.107:6821","nonce":2199389295}]},"public_addr":"192.168.123.107:6817/2199389295","cluster_addr":"192.168.123.107:6819/2199389295","heartbeat_back_addr":"192.168.123.107:6823/2199389295","heartbeat_front_addr":"192.168.123.107:6821/2199389295","state":["exists","up"]},{"osd":6,"uuid":"2846d411-a5e4-48a3-92a9-ede09394307e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6826","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6827","nonce":468752798}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6828","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6829","nonce":468752798}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6832","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6833","nonce":468752798}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.102:6830","nonce":468752798},{"type":"v1","addr":"192.168.123.102:6831","nonce":468752798}]},"public_addr":"192.168.123.102:6827/468752798","cluster_addr":"192.168.123.102:6829/468752798","heartbeat_back_addr":"192.168.123.102:6833/468752798","heartbeat_front_addr":"192.168.123.102:6831/468752798","state":["exists","up"]},{"osd":7,"uuid":"94f52a63-655e-4086-bdc2-473506492269","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":20,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6824","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6825","nonce":1605519816}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6826","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6827","nonce":1605519816}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6830","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6831","nonce":1605519816}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.107:6828","nonce":1605519816},{"type":"v1","addr":"192.168.123.107:6829","nonce":1605519816}]},"public_addr":"192.168.123.107:6825/1605519816","cluster_addr":"192.168.123.107:6827/1605519816","heartbeat_back_addr":"192.168.123.107:6831/1605519816","heartbeat_front_addr":"192.168.123.107:6829/1605519816","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:12.499739+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:13.184302+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:14.691074+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:14.280583+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:15.589406+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:16.501210+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:16.762163+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-10T08:37:17.090182+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.102:0/1060985559":"2026-03-11T08:36:04.177951+0000","192.168.123.102:0/3726942748":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/1101409822":"2026-03-11T08:35:13.970779+0000","192.168.123.102:6801/3140428636":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/4158772845":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/112089711":"2026-03-11T08:36:04.177951+0000","192.168.123.102:6800/3140428636":"2026-03-11T08:35:13.970779+0000","192.168.123.102:0/403379110":"2026-03-11T08:35:25.890482+0000","192.168.123.102:0/801657235":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6800/193015271":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6801/193015271":"2026-03-11T08:35:25.890482+0000","192.168.123.102:0/1503313399":"2026-03-11T08:35:25.890482+0000","192.168.123.102:6800/3560899904":"2026-03-11T08:36:04.177951+0000","192.168.123.102:6801/3560899904":"2026-03-11T08:36:04.177951+0000","192.168.123.102:0/2516377943":"2026-03-11T08:36:04.177951+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-10T08:37:57.565 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:57 vm02 bash[17473]: cluster 2026-03-10T08:37:56.204898+0000 mgr.vm02.ttibzz (mgr.14195) 111 : cluster [DBG] pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:57.565 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:57 vm02 bash[17473]: cluster 2026-03-10T08:37:56.204898+0000 mgr.vm02.ttibzz (mgr.14195) 111 : cluster [DBG] pgmap v67: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:57.566 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.0 flush_pg_stats 2026-03-10T08:37:57.566 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.1 flush_pg_stats 2026-03-10T08:37:57.567 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.2 flush_pg_stats 2026-03-10T08:37:57.567 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.3 flush_pg_stats 2026-03-10T08:37:57.567 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.4 flush_pg_stats 2026-03-10T08:37:57.567 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.5 flush_pg_stats 2026-03-10T08:37:57.567 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.6 flush_pg_stats 2026-03-10T08:37:57.567 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph tell osd.7 flush_pg_stats 2026-03-10T08:37:58.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:58 vm02 bash[17473]: audit 2026-03-10T08:37:57.510679+0000 mon.vm02 (mon.0) 619 : audit [DBG] from='client.? 192.168.123.102:0/2619546125' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T08:37:58.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:58 vm02 bash[17473]: audit 2026-03-10T08:37:57.510679+0000 mon.vm02 (mon.0) 619 : audit [DBG] from='client.? 192.168.123.102:0/2619546125' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-10T08:37:59.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:59 vm02 bash[17473]: cluster 2026-03-10T08:37:58.205157+0000 mgr.vm02.ttibzz (mgr.14195) 112 : cluster [DBG] pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:37:59.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:37:59 vm02 bash[17473]: cluster 2026-03-10T08:37:58.205157+0000 mgr.vm02.ttibzz (mgr.14195) 112 : cluster [DBG] pgmap v68: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:01.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:01 vm02 bash[17473]: cluster 2026-03-10T08:38:00.205374+0000 mgr.vm02.ttibzz (mgr.14195) 113 : cluster [DBG] pgmap v69: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:01.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:01 vm02 bash[17473]: cluster 2026-03-10T08:38:00.205374+0000 mgr.vm02.ttibzz (mgr.14195) 113 : cluster [DBG] pgmap v69: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:02.513 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.514 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.516 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.516 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.516 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.518 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.520 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.522 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:02.985 INFO:teuthology.orchestra.run.vm02.stdout:81604378634 2026-03-10T08:38:02.986 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.4 2026-03-10T08:38:03.152 INFO:teuthology.orchestra.run.vm02.stdout:73014444044 2026-03-10T08:38:03.152 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.1 2026-03-10T08:38:03.281 INFO:teuthology.orchestra.run.vm02.stdout:77309411339 2026-03-10T08:38:03.281 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.3 2026-03-10T08:38:03.291 INFO:teuthology.orchestra.run.vm02.stdout:90194313226 2026-03-10T08:38:03.292 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.5 2026-03-10T08:38:03.371 INFO:teuthology.orchestra.run.vm02.stdout:68719476747 2026-03-10T08:38:03.371 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.0 2026-03-10T08:38:03.378 INFO:teuthology.orchestra.run.vm02.stdout:85899345930 2026-03-10T08:38:03.378 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.7 2026-03-10T08:38:03.388 INFO:teuthology.orchestra.run.vm02.stdout:90194313226 2026-03-10T08:38:03.388 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.6 2026-03-10T08:38:03.405 INFO:teuthology.orchestra.run.vm02.stdout:81604378634 2026-03-10T08:38:03.405 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph osd last-stat-seq osd.2 2026-03-10T08:38:03.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:03 vm02 bash[17473]: cluster 2026-03-10T08:38:02.205597+0000 mgr.vm02.ttibzz (mgr.14195) 114 : cluster [DBG] pgmap v70: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:03.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:03 vm02 bash[17473]: cluster 2026-03-10T08:38:02.205597+0000 mgr.vm02.ttibzz (mgr.14195) 114 : cluster [DBG] pgmap v70: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:04.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:04 vm02 bash[17473]: audit 2026-03-10T08:38:04.235860+0000 mon.vm02 (mon.0) 620 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:04.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:04 vm02 bash[17473]: audit 2026-03-10T08:38:04.235860+0000 mon.vm02 (mon.0) 620 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:05.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:05 vm02 bash[17473]: cluster 2026-03-10T08:38:04.205810+0000 mgr.vm02.ttibzz (mgr.14195) 115 : cluster [DBG] pgmap v71: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:05 vm02 bash[17473]: cluster 2026-03-10T08:38:04.205810+0000 mgr.vm02.ttibzz (mgr.14195) 115 : cluster [DBG] pgmap v71: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:07.763 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.763 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.764 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.765 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.767 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.769 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.771 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.772 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:07.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:07 vm02 bash[17473]: cluster 2026-03-10T08:38:06.206095+0000 mgr.vm02.ttibzz (mgr.14195) 116 : cluster [DBG] pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:07.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:07 vm02 bash[17473]: cluster 2026-03-10T08:38:06.206095+0000 mgr.vm02.ttibzz (mgr.14195) 116 : cluster [DBG] pgmap v72: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:08.448 INFO:teuthology.orchestra.run.vm02.stdout:77309411339 2026-03-10T08:38:08.459 INFO:teuthology.orchestra.run.vm02.stdout:90194313227 2026-03-10T08:38:08.557 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411339 got 77309411339 for osd.3 2026-03-10T08:38:08.557 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.559 INFO:teuthology.orchestra.run.vm02.stdout:73014444045 2026-03-10T08:38:08.629 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313226 got 90194313227 for osd.6 2026-03-10T08:38:08.629 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.679 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444044 got 73014444045 for osd.1 2026-03-10T08:38:08.679 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.688 INFO:teuthology.orchestra.run.vm02.stdout:85899345931 2026-03-10T08:38:08.689 INFO:teuthology.orchestra.run.vm02.stdout:81604378635 2026-03-10T08:38:08.693 INFO:teuthology.orchestra.run.vm02.stdout:81604378635 2026-03-10T08:38:08.738 INFO:teuthology.orchestra.run.vm02.stdout:90194313227 2026-03-10T08:38:08.744 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:08 vm02 bash[17473]: audit 2026-03-10T08:38:08.432369+0000 mon.vm07 (mon.1) 21 : audit [DBG] from='client.? 192.168.123.102:0/3267617650' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T08:38:08.744 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:08 vm02 bash[17473]: audit 2026-03-10T08:38:08.432369+0000 mon.vm07 (mon.1) 21 : audit [DBG] from='client.? 192.168.123.102:0/3267617650' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-10T08:38:08.744 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:08 vm02 bash[17473]: audit 2026-03-10T08:38:08.459113+0000 mon.vm07 (mon.1) 22 : audit [DBG] from='client.? 192.168.123.102:0/2767062023' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T08:38:08.744 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:08 vm02 bash[17473]: audit 2026-03-10T08:38:08.459113+0000 mon.vm07 (mon.1) 22 : audit [DBG] from='client.? 192.168.123.102:0/2767062023' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-10T08:38:08.788 INFO:teuthology.orchestra.run.vm02.stdout:68719476748 2026-03-10T08:38:08.789 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378634 got 81604378635 for osd.2 2026-03-10T08:38:08.789 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.823 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378634 got 81604378635 for osd.4 2026-03-10T08:38:08.823 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.860 INFO:tasks.cephadm.ceph_manager.ceph:need seq 85899345930 got 85899345931 for osd.7 2026-03-10T08:38:08.860 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.891 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313226 got 90194313227 for osd.5 2026-03-10T08:38:08.891 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.896 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476747 got 68719476748 for osd.0 2026-03-10T08:38:08.896 DEBUG:teuthology.parallel:result is None 2026-03-10T08:38:08.896 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-10T08:38:08.896 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph pg dump --format=json 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: cluster 2026-03-10T08:38:08.206409+0000 mgr.vm02.ttibzz (mgr.14195) 117 : cluster [DBG] pgmap v73: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: cluster 2026-03-10T08:38:08.206409+0000 mgr.vm02.ttibzz (mgr.14195) 117 : cluster [DBG] pgmap v73: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.556596+0000 mon.vm07 (mon.1) 23 : audit [DBG] from='client.? 192.168.123.102:0/4027066554' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.556596+0000 mon.vm07 (mon.1) 23 : audit [DBG] from='client.? 192.168.123.102:0/4027066554' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.685425+0000 mon.vm02 (mon.0) 621 : audit [DBG] from='client.? 192.168.123.102:0/3477899066' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.685425+0000 mon.vm02 (mon.0) 621 : audit [DBG] from='client.? 192.168.123.102:0/3477899066' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.688669+0000 mon.vm07 (mon.1) 24 : audit [DBG] from='client.? 192.168.123.102:0/604767299' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.688669+0000 mon.vm07 (mon.1) 24 : audit [DBG] from='client.? 192.168.123.102:0/604767299' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.693086+0000 mon.vm07 (mon.1) 25 : audit [DBG] from='client.? 192.168.123.102:0/2985368956' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.693086+0000 mon.vm07 (mon.1) 25 : audit [DBG] from='client.? 192.168.123.102:0/2985368956' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.727756+0000 mon.vm02 (mon.0) 622 : audit [DBG] from='client.? 192.168.123.102:0/2684747800' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.727756+0000 mon.vm02 (mon.0) 622 : audit [DBG] from='client.? 192.168.123.102:0/2684747800' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.765041+0000 mon.vm02 (mon.0) 623 : audit [DBG] from='client.? 192.168.123.102:0/3595715508' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T08:38:10.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:09 vm02 bash[17473]: audit 2026-03-10T08:38:08.765041+0000 mon.vm02 (mon.0) 623 : audit [DBG] from='client.? 192.168.123.102:0/3595715508' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-10T08:38:12.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:11 vm02 bash[17473]: cluster 2026-03-10T08:38:10.206630+0000 mgr.vm02.ttibzz (mgr.14195) 118 : cluster [DBG] pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:12.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:11 vm02 bash[17473]: cluster 2026-03-10T08:38:10.206630+0000 mgr.vm02.ttibzz (mgr.14195) 118 : cluster [DBG] pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:13.573 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:13.830 INFO:teuthology.orchestra.run.vm02.stderr:dumped all 2026-03-10T08:38:13.830 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:38:13.841 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:13 vm02 bash[17473]: cluster 2026-03-10T08:38:12.206884+0000 mgr.vm02.ttibzz (mgr.14195) 119 : cluster [DBG] pgmap v75: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:13.841 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:13 vm02 bash[17473]: cluster 2026-03-10T08:38:12.206884+0000 mgr.vm02.ttibzz (mgr.14195) 119 : cluster [DBG] pgmap v75: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:13.979 INFO:teuthology.orchestra.run.vm02.stdout:{"pg_ready":true,"pg_map":{"version":75,"stamp":"2026-03-10T08:38:12.206762+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":627776,"kb_used_data":3052,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167111616,"statfs":{"total":171765137408,"available":171122294784,"internally_reserved":0,"allocated":3125248,"data_stored":1995800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12712,"internal_metadata":219663960},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"12.001490"},"pg_stats":[{"pgid":"1.0","version":"22'32","reported_seq":59,"reported_epoch":23,"state":"active+clean","last_fresh":"2026-03-10T08:37:22.367299+0000","last_change":"2026-03-10T08:37:20.464233+0000","last_active":"2026-03-10T08:37:22.367299+0000","last_peered":"2026-03-10T08:37:22.367299+0000","last_clean":"2026-03-10T08:37:22.367299+0000","last_became_active":"2026-03-10T08:37:20.464056+0000","last_became_peered":"2026-03-10T08:37:20.464056+0000","last_unstale":"2026-03-10T08:37:22.367299+0000","last_undegraded":"2026-03-10T08:37:22.367299+0000","last_fullsized":"2026-03-10T08:37:22.367299+0000","mapping_epoch":20,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":21,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T08:37:18.274979+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T08:37:18.274979+0000","last_clean_scrub_stamp":"2026-03-10T08:37:18.274979+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T17:39:51.145378+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":6,"up_from":21,"seq":90194313228,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27052,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940372,"statfs":{"total":21470642176,"available":21442940928,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":21,"seq":90194313228,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":436656,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530768,"statfs":{"total":21470642176,"available":21023506432,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":7,"up_from":20,"seq":85899345932,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27636,"kb_used_data":664,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939788,"statfs":{"total":21470642176,"available":21442342912,"internally_reserved":0,"allocated":679936,"data_stored":536525,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":19,"seq":81604378636,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27052,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940372,"statfs":{"total":21470642176,"available":21442940928,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378636,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27056,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940368,"statfs":{"total":21470642176,"available":21442936832,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411340,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27636,"kb_used_data":664,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939788,"statfs":{"total":21470642176,"available":21442342912,"internally_reserved":0,"allocated":679936,"data_stored":536525,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":17,"seq":73014444046,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27056,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940368,"statfs":{"total":21470642176,"available":21442936832,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476749,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27632,"kb_used_data":664,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939792,"statfs":{"total":21470642176,"available":21442347008,"internally_reserved":0,"allocated":679936,"data_stored":536525,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T08:38:13.979 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph pg dump --format=json 2026-03-10T08:38:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:14 vm02 bash[17473]: audit 2026-03-10T08:38:13.826020+0000 mgr.vm02.ttibzz (mgr.14195) 120 : audit [DBG] from='client.14430 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:38:15.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:14 vm02 bash[17473]: audit 2026-03-10T08:38:13.826020+0000 mgr.vm02.ttibzz (mgr.14195) 120 : audit [DBG] from='client.14430 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:38:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:15 vm02 bash[17473]: cluster 2026-03-10T08:38:14.207117+0000 mgr.vm02.ttibzz (mgr.14195) 121 : cluster [DBG] pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:15 vm02 bash[17473]: cluster 2026-03-10T08:38:14.207117+0000 mgr.vm02.ttibzz (mgr.14195) 121 : cluster [DBG] pgmap v76: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:18.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:17 vm02 bash[17473]: cluster 2026-03-10T08:38:16.207429+0000 mgr.vm02.ttibzz (mgr.14195) 122 : cluster [DBG] pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:18.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:17 vm02 bash[17473]: cluster 2026-03-10T08:38:16.207429+0000 mgr.vm02.ttibzz (mgr.14195) 122 : cluster [DBG] pgmap v77: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:18.614 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:19.470 INFO:teuthology.orchestra.run.vm02.stderr:dumped all 2026-03-10T08:38:19.470 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:38:19.820 INFO:teuthology.orchestra.run.vm02.stdout:{"pg_ready":true,"pg_map":{"version":78,"stamp":"2026-03-10T08:38:18.207616+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":627776,"kb_used_data":3052,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167111616,"statfs":{"total":171765137408,"available":171122294784,"internally_reserved":0,"allocated":3125248,"data_stored":1995800,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12712,"internal_metadata":219663960},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"12.001661"},"pg_stats":[{"pgid":"1.0","version":"22'32","reported_seq":59,"reported_epoch":23,"state":"active+clean","last_fresh":"2026-03-10T08:37:22.367299+0000","last_change":"2026-03-10T08:37:20.464233+0000","last_active":"2026-03-10T08:37:22.367299+0000","last_peered":"2026-03-10T08:37:22.367299+0000","last_clean":"2026-03-10T08:37:22.367299+0000","last_became_active":"2026-03-10T08:37:20.464056+0000","last_became_peered":"2026-03-10T08:37:20.464056+0000","last_unstale":"2026-03-10T08:37:22.367299+0000","last_undegraded":"2026-03-10T08:37:22.367299+0000","last_fullsized":"2026-03-10T08:37:22.367299+0000","mapping_epoch":20,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":21,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-10T08:37:18.274979+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-10T08:37:18.274979+0000","last_clean_scrub_stamp":"2026-03-10T08:37:18.274979+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-11T17:39:51.145378+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":6,"up_from":21,"seq":90194313229,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27052,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940372,"statfs":{"total":21470642176,"available":21442940928,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":21,"seq":90194313229,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":436656,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530768,"statfs":{"total":21470642176,"available":21023506432,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":7,"up_from":20,"seq":85899345933,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27636,"kb_used_data":664,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939788,"statfs":{"total":21470642176,"available":21442342912,"internally_reserved":0,"allocated":679936,"data_stored":536525,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":19,"seq":81604378637,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27052,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940372,"statfs":{"total":21470642176,"available":21442940928,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378637,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27056,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940368,"statfs":{"total":21470642176,"available":21442936832,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411342,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27636,"kb_used_data":664,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939788,"statfs":{"total":21470642176,"available":21442342912,"internally_reserved":0,"allocated":679936,"data_stored":536525,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1588,"internal_metadata":27457996},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":17,"seq":73014444047,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27056,"kb_used_data":212,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940368,"statfs":{"total":21470642176,"available":21442936832,"internally_reserved":0,"allocated":217088,"data_stored":77245,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476750,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27632,"kb_used_data":664,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939792,"statfs":{"total":21470642176,"available":21442347008,"internally_reserved":0,"allocated":679936,"data_stored":536525,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-10T08:38:19.820 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-10T08:38:19.820 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-10T08:38:19.821 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-10T08:38:19.821 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph health --format=json 2026-03-10T08:38:20.066 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:19 vm02 bash[17473]: cluster 2026-03-10T08:38:18.207775+0000 mgr.vm02.ttibzz (mgr.14195) 123 : cluster [DBG] pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:20.066 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:19 vm02 bash[17473]: cluster 2026-03-10T08:38:18.207775+0000 mgr.vm02.ttibzz (mgr.14195) 123 : cluster [DBG] pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:20.066 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:19 vm02 bash[17473]: audit 2026-03-10T08:38:19.239562+0000 mon.vm02 (mon.0) 624 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:20.066 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:19 vm02 bash[17473]: audit 2026-03-10T08:38:19.239562+0000 mon.vm02 (mon.0) 624 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:21.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:20 vm02 bash[17473]: audit 2026-03-10T08:38:19.466371+0000 mgr.vm02.ttibzz (mgr.14195) 124 : audit [DBG] from='client.14434 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:38:21.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:20 vm02 bash[17473]: audit 2026-03-10T08:38:19.466371+0000 mgr.vm02.ttibzz (mgr.14195) 124 : audit [DBG] from='client.14434 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:38:22.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:21 vm02 bash[17473]: cluster 2026-03-10T08:38:20.208052+0000 mgr.vm02.ttibzz (mgr.14195) 125 : cluster [DBG] pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:22.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:21 vm02 bash[17473]: cluster 2026-03-10T08:38:20.208052+0000 mgr.vm02.ttibzz (mgr.14195) 125 : cluster [DBG] pgmap v79: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:24.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:23 vm02 bash[17473]: cluster 2026-03-10T08:38:22.208319+0000 mgr.vm02.ttibzz (mgr.14195) 126 : cluster [DBG] pgmap v80: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:24.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:23 vm02 bash[17473]: cluster 2026-03-10T08:38:22.208319+0000 mgr.vm02.ttibzz (mgr.14195) 126 : cluster [DBG] pgmap v80: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:24.458 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:24.721 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:38:24.722 INFO:teuthology.orchestra.run.vm02.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-10T08:38:24.770 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-10T08:38:24.770 INFO:tasks.cephadm:Setup complete, yielding 2026-03-10T08:38:24.770 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T08:38:24.772 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm02.local 2026-03-10T08:38:24.772 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch status' 2026-03-10T08:38:25.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:24 vm02 bash[17473]: audit 2026-03-10T08:38:24.718416+0000 mon.vm02 (mon.0) 625 : audit [DBG] from='client.? 192.168.123.102:0/1847694650' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T08:38:25.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:24 vm02 bash[17473]: audit 2026-03-10T08:38:24.718416+0000 mon.vm02 (mon.0) 625 : audit [DBG] from='client.? 192.168.123.102:0/1847694650' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-10T08:38:26.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:25 vm02 bash[17473]: cluster 2026-03-10T08:38:24.208568+0000 mgr.vm02.ttibzz (mgr.14195) 127 : cluster [DBG] pgmap v81: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:26.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:25 vm02 bash[17473]: cluster 2026-03-10T08:38:24.208568+0000 mgr.vm02.ttibzz (mgr.14195) 127 : cluster [DBG] pgmap v81: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:28.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:27 vm02 bash[17473]: cluster 2026-03-10T08:38:26.208811+0000 mgr.vm02.ttibzz (mgr.14195) 128 : cluster [DBG] pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:28.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:27 vm02 bash[17473]: cluster 2026-03-10T08:38:26.208811+0000 mgr.vm02.ttibzz (mgr.14195) 128 : cluster [DBG] pgmap v82: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:28.488 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:28.749 INFO:teuthology.orchestra.run.vm02.stdout:Backend: cephadm 2026-03-10T08:38:28.749 INFO:teuthology.orchestra.run.vm02.stdout:Available: Yes 2026-03-10T08:38:28.749 INFO:teuthology.orchestra.run.vm02.stdout:Paused: No 2026-03-10T08:38:28.796 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch ps' 2026-03-10T08:38:30.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:29 vm02 bash[17473]: cluster 2026-03-10T08:38:28.209039+0000 mgr.vm02.ttibzz (mgr.14195) 129 : cluster [DBG] pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:29 vm02 bash[17473]: cluster 2026-03-10T08:38:28.209039+0000 mgr.vm02.ttibzz (mgr.14195) 129 : cluster [DBG] pgmap v83: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:29 vm02 bash[17473]: audit 2026-03-10T08:38:28.745913+0000 mgr.vm02.ttibzz (mgr.14195) 130 : audit [DBG] from='client.14442 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:30.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:29 vm02 bash[17473]: audit 2026-03-10T08:38:28.745913+0000 mgr.vm02.ttibzz (mgr.14195) 130 : audit [DBG] from='client.14442 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:30 vm02 bash[17473]: cluster 2026-03-10T08:38:30.209308+0000 mgr.vm02.ttibzz (mgr.14195) 131 : cluster [DBG] pgmap v84: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:30 vm02 bash[17473]: cluster 2026-03-10T08:38:30.209308+0000 mgr.vm02.ttibzz (mgr.14195) 131 : cluster [DBG] pgmap v84: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:30 vm02 bash[17473]: audit 2026-03-10T08:38:30.817985+0000 mon.vm02 (mon.0) 626 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:38:31.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:30 vm02 bash[17473]: audit 2026-03-10T08:38:30.817985+0000 mon.vm02 (mon.0) 626 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:38:32.516 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:33.130 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:38:33.130 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.vm02 vm02 *:9093,9094 running (2m) 68s ago 2m 16.5M - 0.25.0 c8568f914cd2 16c87a6d1f4a 2026-03-10T08:38:33.130 INFO:teuthology.orchestra.run.vm02.stdout:ceph-exporter.vm02 vm02 *:9926 running (2m) 68s ago 2m 8504k - 19.2.3-678-ge911bdeb 654f31e6858e 12e8503af357 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:ceph-exporter.vm07 vm07 *:9926 running (2m) 68s ago 2m 6072k - 19.2.3-678-ge911bdeb 654f31e6858e a8d32ada6006 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:crash.vm02 vm02 running (2m) 68s ago 2m 7296k - 19.2.3-678-ge911bdeb 654f31e6858e 4106c3e4838e 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:crash.vm07 vm07 running (2m) 68s ago 2m 7300k - 19.2.3-678-ge911bdeb 654f31e6858e 3100cb6ea492 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:grafana.vm02 vm02 *:3000 running (2m) 68s ago 2m 74.6M - 10.4.0 c8b91775d855 05dae469dd45 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:mgr.vm02.ttibzz vm02 *:9283,8765,8443 running (3m) 68s ago 3m 522M - 19.2.3-678-ge911bdeb 654f31e6858e 8d9c2a7da34e 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:mgr.vm07.aunzpk vm07 *:8443,9283,8765 running (2m) 68s ago 2m 465M - 19.2.3-678-ge911bdeb 654f31e6858e 1af99d1fee53 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:mon.vm02 vm02 running (3m) 68s ago 3m 44.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e ba23f51f501b 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:mon.vm07 vm07 running (2m) 68s ago 2m 38.4M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 71f3c37e9d30 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.vm02 vm02 *:9100 running (2m) 68s ago 2m 7472k - 1.7.0 72c9c2088986 0351d8835d2b 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.vm07 vm07 *:9100 running (2m) 68s ago 2m 7352k - 1.7.0 72c9c2088986 0cfd18951c12 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (83s) 68s ago 85s 31.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bb43d29b6166 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm07 running (82s) 68s ago 84s 49.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a07a6e9c27d7 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (82s) 68s ago 84s 49.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a95fdb8431e2 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm07 running (81s) 68s ago 83s 30.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f420a4d87e7d 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm02 running (80s) 68s ago 82s 27.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 4e32ee3ddf6b 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm07 running (79s) 68s ago 82s 48.9M 4096M 19.2.3-678-ge911bdeb 654f31e6858e c45cd22aa88c 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm02 running (79s) 68s ago 81s 50.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 42c11c146777 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm07 running (78s) 68s ago 80s 35.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3ce871016ee2 2026-03-10T08:38:33.131 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.vm02 vm02 *:9095 running (2m) 68s ago 2m 28.5M - 2.51.0 1d3b7f56885b a19901308fdd 2026-03-10T08:38:33.178 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch ls' 2026-03-10T08:38:33.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:33 vm02 bash[17473]: cluster 2026-03-10T08:38:32.209591+0000 mgr.vm02.ttibzz (mgr.14195) 132 : cluster [DBG] pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:33.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:33 vm02 bash[17473]: cluster 2026-03-10T08:38:32.209591+0000 mgr.vm02.ttibzz (mgr.14195) 132 : cluster [DBG] pgmap v85: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:34.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:34 vm02 bash[17473]: audit 2026-03-10T08:38:33.122592+0000 mgr.vm02.ttibzz (mgr.14195) 133 : audit [DBG] from='client.14446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:34.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:34 vm02 bash[17473]: audit 2026-03-10T08:38:33.122592+0000 mgr.vm02.ttibzz (mgr.14195) 133 : audit [DBG] from='client.14446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:34.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:34 vm02 bash[17473]: audit 2026-03-10T08:38:34.237536+0000 mon.vm02 (mon.0) 627 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:34.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:34 vm02 bash[17473]: audit 2026-03-10T08:38:34.237536+0000 mon.vm02 (mon.0) 627 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:35.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:35 vm02 bash[17473]: cluster 2026-03-10T08:38:34.209884+0000 mgr.vm02.ttibzz (mgr.14195) 134 : cluster [DBG] pgmap v86: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:35.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:35 vm02 bash[17473]: cluster 2026-03-10T08:38:34.209884+0000 mgr.vm02.ttibzz (mgr.14195) 134 : cluster [DBG] pgmap v86: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:35.875728+0000 mon.vm02 (mon.0) 628 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:35.875728+0000 mon.vm02 (mon.0) 628 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:35.880429+0000 mon.vm02 (mon.0) 629 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:35.880429+0000 mon.vm02 (mon.0) 629 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.129716+0000 mon.vm02 (mon.0) 630 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.129716+0000 mon.vm02 (mon.0) 630 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.134688+0000 mon.vm02 (mon.0) 631 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.134688+0000 mon.vm02 (mon.0) 631 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.421448+0000 mon.vm02 (mon.0) 632 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.421448+0000 mon.vm02 (mon.0) 632 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.421890+0000 mon.vm02 (mon.0) 633 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.421890+0000 mon.vm02 (mon.0) 633 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.613731+0000 mon.vm02 (mon.0) 634 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.613731+0000 mon.vm02 (mon.0) 634 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.615369+0000 mon.vm02 (mon.0) 635 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:38:37.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:36 vm02 bash[17473]: audit 2026-03-10T08:38:36.615369+0000 mon.vm02 (mon.0) 635 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:38:37.564 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:37.812 INFO:teuthology.orchestra.run.vm02.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T08:38:37.812 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager ?:9093,9094 1/1 1s ago 3m count:1 2026-03-10T08:38:37.812 INFO:teuthology.orchestra.run.vm02.stdout:ceph-exporter ?:9926 2/2 1s ago 3m * 2026-03-10T08:38:37.812 INFO:teuthology.orchestra.run.vm02.stdout:crash 2/2 1s ago 3m * 2026-03-10T08:38:37.812 INFO:teuthology.orchestra.run.vm02.stdout:grafana ?:3000 1/1 1s ago 3m count:1 2026-03-10T08:38:37.812 INFO:teuthology.orchestra.run.vm02.stdout:mgr 2/2 1s ago 3m count:2 2026-03-10T08:38:37.812 INFO:teuthology.orchestra.run.vm02.stdout:mon 2/2 1s ago 2m vm02:192.168.123.102=vm02;vm07:192.168.123.107=vm07;count:2 2026-03-10T08:38:37.813 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter ?:9100 2/2 1s ago 3m * 2026-03-10T08:38:37.813 INFO:teuthology.orchestra.run.vm02.stdout:osd.all-available-devices 8 1s ago 2m * 2026-03-10T08:38:37.813 INFO:teuthology.orchestra.run.vm02.stdout:prometheus ?:9095 1/1 1s ago 3m count:1 2026-03-10T08:38:37.868 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch host ls' 2026-03-10T08:38:38.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:37 vm02 bash[17473]: cluster 2026-03-10T08:38:36.210197+0000 mgr.vm02.ttibzz (mgr.14195) 135 : cluster [DBG] pgmap v87: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:38.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:37 vm02 bash[17473]: cluster 2026-03-10T08:38:36.210197+0000 mgr.vm02.ttibzz (mgr.14195) 135 : cluster [DBG] pgmap v87: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:39.467 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:39 vm02 bash[17473]: audit 2026-03-10T08:38:37.806533+0000 mgr.vm02.ttibzz (mgr.14195) 136 : audit [DBG] from='client.24303 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:39.467 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:39 vm02 bash[17473]: audit 2026-03-10T08:38:37.806533+0000 mgr.vm02.ttibzz (mgr.14195) 136 : audit [DBG] from='client.24303 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:40.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:40 vm02 bash[17473]: cluster 2026-03-10T08:38:38.210463+0000 mgr.vm02.ttibzz (mgr.14195) 137 : cluster [DBG] pgmap v88: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:40.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:40 vm02 bash[17473]: cluster 2026-03-10T08:38:38.210463+0000 mgr.vm02.ttibzz (mgr.14195) 137 : cluster [DBG] pgmap v88: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:41.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:41 vm02 bash[17473]: cluster 2026-03-10T08:38:40.210688+0000 mgr.vm02.ttibzz (mgr.14195) 138 : cluster [DBG] pgmap v89: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:41.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:41 vm02 bash[17473]: cluster 2026-03-10T08:38:40.210688+0000 mgr.vm02.ttibzz (mgr.14195) 138 : cluster [DBG] pgmap v89: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:41.594 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:41.835 INFO:teuthology.orchestra.run.vm02.stdout:HOST ADDR LABELS STATUS 2026-03-10T08:38:41.835 INFO:teuthology.orchestra.run.vm02.stdout:vm02 192.168.123.102 2026-03-10T08:38:41.835 INFO:teuthology.orchestra.run.vm02.stdout:vm07 192.168.123.107 2026-03-10T08:38:41.835 INFO:teuthology.orchestra.run.vm02.stdout:2 hosts in cluster 2026-03-10T08:38:41.914 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch device ls' 2026-03-10T08:38:42.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:42 vm02 bash[17473]: audit 2026-03-10T08:38:41.830893+0000 mgr.vm02.ttibzz (mgr.14195) 139 : audit [DBG] from='client.24307 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:42.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:42 vm02 bash[17473]: audit 2026-03-10T08:38:41.830893+0000 mgr.vm02.ttibzz (mgr.14195) 139 : audit [DBG] from='client.24307 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:43.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:43 vm02 bash[17473]: cluster 2026-03-10T08:38:42.210934+0000 mgr.vm02.ttibzz (mgr.14195) 140 : cluster [DBG] pgmap v90: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:43.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:43 vm02 bash[17473]: cluster 2026-03-10T08:38:42.210934+0000 mgr.vm02.ttibzz (mgr.14195) 140 : cluster [DBG] pgmap v90: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:45.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:45 vm02 bash[17473]: cluster 2026-03-10T08:38:44.211199+0000 mgr.vm02.ttibzz (mgr.14195) 141 : cluster [DBG] pgmap v91: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:45.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:45 vm02 bash[17473]: cluster 2026-03-10T08:38:44.211199+0000 mgr.vm02.ttibzz (mgr.14195) 141 : cluster [DBG] pgmap v91: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:45.628 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 76s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdb hdd DWNBRSTVMM02001 20.0G No 76s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdc hdd DWNBRSTVMM02002 20.0G No 76s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdd hdd DWNBRSTVMM02003 20.0G No 76s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vde hdd DWNBRSTVMM02004 20.0G No 76s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 75s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdb hdd DWNBRSTVMM07001 20.0G No 75s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdc hdd DWNBRSTVMM07002 20.0G No 75s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdd hdd DWNBRSTVMM07003 20.0G No 75s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:45.883 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vde hdd DWNBRSTVMM07004 20.0G No 75s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:46.091 INFO:teuthology.run_tasks:Running task vip... 2026-03-10T08:38:46.140 INFO:tasks.vip:Allocating static IPs for each host... 2026-03-10T08:38:46.140 INFO:tasks.vip:peername 192.168.123.102 2026-03-10T08:38:46.140 INFO:tasks.vip:192.168.123.102 in 192.168.123.0/24, pos 101 2026-03-10T08:38:46.140 INFO:tasks.vip:vm02.local static 12.12.0.102, vnet 12.12.0.0/22 2026-03-10T08:38:46.140 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.102')] 2026-03-10T08:38:46.140 DEBUG:teuthology.orchestra.run.vm02:> sudo ip route ls 2026-03-10T08:38:46.148 INFO:teuthology.orchestra.run.vm02.stdout:default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.102 metric 100 2026-03-10T08:38:46.148 INFO:teuthology.orchestra.run.vm02.stdout:172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-03-10T08:38:46.148 INFO:teuthology.orchestra.run.vm02.stdout:192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.102 metric 100 2026-03-10T08:38:46.148 INFO:teuthology.orchestra.run.vm02.stdout:192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.102 metric 100 2026-03-10T08:38:46.148 INFO:tasks.vip:Configuring 12.12.0.102 on vm02.local iface ens3... 2026-03-10T08:38:46.149 DEBUG:teuthology.orchestra.run.vm02:> sudo ip addr add 12.12.0.102/22 dev ens3 2026-03-10T08:38:46.198 INFO:tasks.vip:peername 192.168.123.107 2026-03-10T08:38:46.198 INFO:tasks.vip:192.168.123.107 in 192.168.123.0/24, pos 106 2026-03-10T08:38:46.198 INFO:tasks.vip:vm07.local static 12.12.0.107, vnet 12.12.0.0/22 2026-03-10T08:38:46.198 DEBUG:teuthology.orchestra.run.vm07:> sudo ip route ls 2026-03-10T08:38:46.204 INFO:teuthology.orchestra.run.vm07.stdout:default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.107 metric 100 2026-03-10T08:38:46.205 INFO:teuthology.orchestra.run.vm07.stdout:172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-03-10T08:38:46.205 INFO:teuthology.orchestra.run.vm07.stdout:192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.107 metric 100 2026-03-10T08:38:46.205 INFO:teuthology.orchestra.run.vm07.stdout:192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.107 metric 100 2026-03-10T08:38:46.205 INFO:tasks.vip:Configuring 12.12.0.107 on vm07.local iface ens3... 2026-03-10T08:38:46.205 DEBUG:teuthology.orchestra.run.vm07:> sudo ip addr add 12.12.0.107/22 dev ens3 2026-03-10T08:38:46.253 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T08:38:46.263 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm02.local 2026-03-10T08:38:46.285 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch device ls --refresh' 2026-03-10T08:38:46.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:46 vm02 bash[17473]: audit 2026-03-10T08:38:45.878566+0000 mgr.vm02.ttibzz (mgr.14195) 142 : audit [DBG] from='client.14458 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:46.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:46 vm02 bash[17473]: audit 2026-03-10T08:38:45.878566+0000 mgr.vm02.ttibzz (mgr.14195) 142 : audit [DBG] from='client.14458 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:47 vm02 bash[17473]: cluster 2026-03-10T08:38:46.211525+0000 mgr.vm02.ttibzz (mgr.14195) 143 : cluster [DBG] pgmap v92: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:47 vm02 bash[17473]: cluster 2026-03-10T08:38:46.211525+0000 mgr.vm02.ttibzz (mgr.14195) 143 : cluster [DBG] pgmap v92: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:49.566 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:49 vm02 bash[17473]: cluster 2026-03-10T08:38:48.211805+0000 mgr.vm02.ttibzz (mgr.14195) 144 : cluster [DBG] pgmap v93: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:49.566 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:49 vm02 bash[17473]: cluster 2026-03-10T08:38:48.211805+0000 mgr.vm02.ttibzz (mgr.14195) 144 : cluster [DBG] pgmap v93: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:49.566 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:49 vm02 bash[17473]: audit 2026-03-10T08:38:49.237565+0000 mon.vm02 (mon.0) 636 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:49.566 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:49 vm02 bash[17473]: audit 2026-03-10T08:38:49.237565+0000 mon.vm02 (mon.0) 636 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:38:50.914 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 81s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdb hdd DWNBRSTVMM02001 20.0G No 81s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdc hdd DWNBRSTVMM02002 20.0G No 81s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdd hdd DWNBRSTVMM02003 20.0G No 81s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vde hdd DWNBRSTVMM02004 20.0G No 81s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 80s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T08:38:51.148 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdb hdd DWNBRSTVMM07001 20.0G No 80s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.149 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdc hdd DWNBRSTVMM07002 20.0G No 80s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.149 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdd hdd DWNBRSTVMM07003 20.0G No 80s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.149 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vde hdd DWNBRSTVMM07004 20.0G No 80s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:38:51.228 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-03-10T08:38:51.232 INFO:tasks.cephadm:Applying spec(s): placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 --- placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: 12.12.1.102/22 2026-03-10T08:38:51.232 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch apply -i - 2026-03-10T08:38:51.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:51 vm02 bash[17473]: cluster 2026-03-10T08:38:50.212034+0000 mgr.vm02.ttibzz (mgr.14195) 145 : cluster [DBG] pgmap v94: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:51.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:51 vm02 bash[17473]: cluster 2026-03-10T08:38:50.212034+0000 mgr.vm02.ttibzz (mgr.14195) 145 : cluster [DBG] pgmap v94: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:51.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:51 vm02 bash[17473]: audit 2026-03-10T08:38:51.142503+0000 mon.vm02 (mon.0) 637 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:38:51.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:51 vm02 bash[17473]: audit 2026-03-10T08:38:51.142503+0000 mon.vm02 (mon.0) 637 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:38:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:52 vm02 bash[17473]: audit 2026-03-10T08:38:51.141081+0000 mgr.vm02.ttibzz (mgr.14195) 146 : audit [DBG] from='client.24315 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:52.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:52 vm02 bash[17473]: audit 2026-03-10T08:38:51.141081+0000 mgr.vm02.ttibzz (mgr.14195) 146 : audit [DBG] from='client.24315 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:53.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:53 vm02 bash[17473]: cluster 2026-03-10T08:38:52.212269+0000 mgr.vm02.ttibzz (mgr.14195) 147 : cluster [DBG] pgmap v95: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:53.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:53 vm02 bash[17473]: cluster 2026-03-10T08:38:52.212269+0000 mgr.vm02.ttibzz (mgr.14195) 147 : cluster [DBG] pgmap v95: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:54.947 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:55.212 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled rgw.foo update... 2026-03-10T08:38:55.212 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled ingress.rgw.foo update... 2026-03-10T08:38:55.303 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-10T08:38:55.306 INFO:tasks.cephadm:Waiting for ceph service rgw.foo to start (timeout 300)... 2026-03-10T08:38:55.306 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:38:55.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:55 vm02 bash[17473]: cluster 2026-03-10T08:38:54.212505+0000 mgr.vm02.ttibzz (mgr.14195) 148 : cluster [DBG] pgmap v96: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:55.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:55 vm02 bash[17473]: cluster 2026-03-10T08:38:54.212505+0000 mgr.vm02.ttibzz (mgr.14195) 148 : cluster [DBG] pgmap v96: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:55.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:55 vm02 bash[17473]: audit 2026-03-10T08:38:55.204772+0000 mon.vm02 (mon.0) 638 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:55.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:55 vm02 bash[17473]: audit 2026-03-10T08:38:55.204772+0000 mon.vm02 (mon.0) 638 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:55.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:55 vm02 bash[17473]: audit 2026-03-10T08:38:55.208094+0000 mon.vm02 (mon.0) 639 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:55.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:55 vm02 bash[17473]: audit 2026-03-10T08:38:55.208094+0000 mon.vm02 (mon.0) 639 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.198504+0000 mgr.vm02.ttibzz (mgr.14195) 149 : audit [DBG] from='client.24319 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.198504+0000 mgr.vm02.ttibzz (mgr.14195) 149 : audit [DBG] from='client.24319 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: cephadm 2026-03-10T08:38:55.201061+0000 mgr.vm02.ttibzz (mgr.14195) 150 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: cephadm 2026-03-10T08:38:55.201061+0000 mgr.vm02.ttibzz (mgr.14195) 150 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: cephadm 2026-03-10T08:38:55.205431+0000 mgr.vm02.ttibzz (mgr.14195) 151 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: cephadm 2026-03-10T08:38:55.205431+0000 mgr.vm02.ttibzz (mgr.14195) 151 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.600010+0000 mon.vm02 (mon.0) 640 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.600010+0000 mon.vm02 (mon.0) 640 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.603998+0000 mon.vm02 (mon.0) 641 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.603998+0000 mon.vm02 (mon.0) 641 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.852747+0000 mon.vm02 (mon.0) 642 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.852747+0000 mon.vm02 (mon.0) 642 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.858274+0000 mon.vm02 (mon.0) 643 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:55.858274+0000 mon.vm02 (mon.0) 643 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.175187+0000 mon.vm02 (mon.0) 644 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.175187+0000 mon.vm02 (mon.0) 644 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.179010+0000 mon.vm02 (mon.0) 645 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.179010+0000 mon.vm02 (mon.0) 645 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.459323+0000 mon.vm02 (mon.0) 646 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.459323+0000 mon.vm02 (mon.0) 646 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.463506+0000 mon.vm02 (mon.0) 647 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:57.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:56 vm02 bash[17473]: audit 2026-03-10T08:38:56.463506+0000 mon.vm02 (mon.0) 647 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:38:58.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:57 vm02 bash[17473]: cluster 2026-03-10T08:38:56.212767+0000 mgr.vm02.ttibzz (mgr.14195) 152 : cluster [DBG] pgmap v97: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:58.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:57 vm02 bash[17473]: cluster 2026-03-10T08:38:56.212767+0000 mgr.vm02.ttibzz (mgr.14195) 152 : cluster [DBG] pgmap v97: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:38:58.979 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:38:59.233 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:38:59.233 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:38:55.594452Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:38:55.594700Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:38:55.594561Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:38:55.594420Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:38:55.208393Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:38:55.594673Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:38:55.594507Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:38:55.594646Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:38:55.594372Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "last_refresh": "2026-03-10T08:38:55.594591Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:38:55.205139Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:38:55.201072Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T08:38:59.287 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T08:39:00.006 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:59 vm02 bash[17473]: cluster 2026-03-10T08:38:58.213039+0000 mgr.vm02.ttibzz (mgr.14195) 153 : cluster [DBG] pgmap v98: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:00.006 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:38:59 vm02 bash[17473]: cluster 2026-03-10T08:38:58.213039+0000 mgr.vm02.ttibzz (mgr.14195) 153 : cluster [DBG] pgmap v98: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:00.289 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:39:01.002 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:00 vm02 bash[17473]: audit 2026-03-10T08:38:59.227849+0000 mgr.vm02.ttibzz (mgr.14195) 154 : audit [DBG] from='client.14468 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:01.002 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:00 vm02 bash[17473]: audit 2026-03-10T08:38:59.227849+0000 mgr.vm02.ttibzz (mgr.14195) 154 : audit [DBG] from='client.14468 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:02.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:01 vm02 bash[17473]: cluster 2026-03-10T08:39:00.213315+0000 mgr.vm02.ttibzz (mgr.14195) 155 : cluster [DBG] pgmap v99: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:02.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:01 vm02 bash[17473]: cluster 2026-03-10T08:39:00.213315+0000 mgr.vm02.ttibzz (mgr.14195) 155 : cluster [DBG] pgmap v99: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:02.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:01 vm02 bash[17473]: audit 2026-03-10T08:39:00.732269+0000 mon.vm02 (mon.0) 648 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:02.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:01 vm02 bash[17473]: audit 2026-03-10T08:39:00.732269+0000 mon.vm02 (mon.0) 648 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:02.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:01 vm02 bash[17473]: audit 2026-03-10T08:39:00.736839+0000 mon.vm02 (mon.0) 649 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:02.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:01 vm02 bash[17473]: audit 2026-03-10T08:39:00.736839+0000 mon.vm02 (mon.0) 649 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:02.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:02 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:02.783 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:02 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:03.262 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.966089+0000 mon.vm02 (mon.0) 650 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.262 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.966089+0000 mon.vm02 (mon.0) 650 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.970423+0000 mon.vm02 (mon.0) 651 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.970423+0000 mon.vm02 (mon.0) 651 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.971054+0000 mon.vm02 (mon.0) 652 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.971054+0000 mon.vm02 (mon.0) 652 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.971563+0000 mon.vm02 (mon.0) 653 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.971563+0000 mon.vm02 (mon.0) 653 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.974708+0000 mon.vm02 (mon.0) 654 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.974708+0000 mon.vm02 (mon.0) 654 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.975944+0000 mon.vm02 (mon.0) 655 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.975944+0000 mon.vm02 (mon.0) 655 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.978762+0000 mon.vm02 (mon.0) 656 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.wecerd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.978762+0000 mon.vm02 (mon.0) 656 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.wecerd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.980420+0000 mon.vm02 (mon.0) 657 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.wecerd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.980420+0000 mon.vm02 (mon.0) 657 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.wecerd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.984351+0000 mon.vm02 (mon.0) 658 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.984351+0000 mon.vm02 (mon.0) 658 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.985320+0000 mon.vm02 (mon.0) 659 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:01.985320+0000 mon.vm02 (mon.0) 659 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: cephadm 2026-03-10T08:39:01.985855+0000 mgr.vm02.ttibzz (mgr.14195) 156 : cephadm [INF] Deploying daemon rgw.foo.vm07.wecerd on vm07 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: cephadm 2026-03-10T08:39:01.985855+0000 mgr.vm02.ttibzz (mgr.14195) 156 : cephadm [INF] Deploying daemon rgw.foo.vm07.wecerd on vm07 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: cluster 2026-03-10T08:39:02.213582+0000 mgr.vm02.ttibzz (mgr.14195) 157 : cluster [DBG] pgmap v100: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: cluster 2026-03-10T08:39:02.213582+0000 mgr.vm02.ttibzz (mgr.14195) 157 : cluster [DBG] pgmap v100: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.760176+0000 mon.vm02 (mon.0) 660 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.760176+0000 mon.vm02 (mon.0) 660 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.765236+0000 mon.vm02 (mon.0) 661 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.765236+0000 mon.vm02 (mon.0) 661 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.768748+0000 mon.vm02 (mon.0) 662 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.768748+0000 mon.vm02 (mon.0) 662 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.769305+0000 mon.vm02 (mon.0) 663 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.rugqqv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.769305+0000 mon.vm02 (mon.0) 663 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.rugqqv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.770793+0000 mon.vm02 (mon.0) 664 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.rugqqv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.770793+0000 mon.vm02 (mon.0) 664 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.rugqqv", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.774101+0000 mon.vm02 (mon.0) 665 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.774101+0000 mon.vm02 (mon.0) 665 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.775013+0000 mon.vm02 (mon.0) 666 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: audit 2026-03-10T08:39:02.775013+0000 mon.vm02 (mon.0) 666 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: cephadm 2026-03-10T08:39:02.775594+0000 mgr.vm02.ttibzz (mgr.14195) 158 : cephadm [INF] Deploying daemon rgw.foo.vm02.rugqqv on vm02 2026-03-10T08:39:03.263 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:02 vm02 bash[17473]: cephadm 2026-03-10T08:39:02.775594+0000 mgr.vm02.ttibzz (mgr.14195) 158 : cephadm [INF] Deploying daemon rgw.foo.vm02.rugqqv on vm02 2026-03-10T08:39:03.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:03 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:03.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:03 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:04.017 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:04.158 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:04 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:04.300 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:39:04.300 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:38:55.594452Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:38:55.594700Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:38:55.594561Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:38:55.594420Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:38:55.208393Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:38:55.594673Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:38:55.594507Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:38:55.594646Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:38:55.594372Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "last_refresh": "2026-03-10T08:38:55.594591Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:03.584254Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:38:55.201072Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T08:39:04.350 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T08:39:04.440 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:04 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:04.587 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.576575+0000 mon.vm02 (mon.0) 667 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.576575+0000 mon.vm02 (mon.0) 667 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.581138+0000 mon.vm02 (mon.0) 668 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.581138+0000 mon.vm02 (mon.0) 668 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.584035+0000 mon.vm02 (mon.0) 669 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.584035+0000 mon.vm02 (mon.0) 669 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.584507+0000 mon.vm02 (mon.0) 670 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.zylyez", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.584507+0000 mon.vm02 (mon.0) 670 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.zylyez", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.586864+0000 mon.vm02 (mon.0) 671 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.zylyez", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.586864+0000 mon.vm02 (mon.0) 671 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm07.zylyez", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.600031+0000 mon.vm02 (mon.0) 672 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.600031+0000 mon.vm02 (mon.0) 672 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.600996+0000 mon.vm02 (mon.0) 673 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.600996+0000 mon.vm02 (mon.0) 673 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: cephadm 2026-03-10T08:39:03.601561+0000 mgr.vm02.ttibzz (mgr.14195) 159 : cephadm [INF] Deploying daemon rgw.foo.vm07.zylyez on vm07 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: cephadm 2026-03-10T08:39:03.601561+0000 mgr.vm02.ttibzz (mgr.14195) 159 : cephadm [INF] Deploying daemon rgw.foo.vm07.zylyez on vm07 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: cluster 2026-03-10T08:39:03.780327+0000 mon.vm02 (mon.0) 674 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: cluster 2026-03-10T08:39:03.780327+0000 mon.vm02 (mon.0) 674 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.791139+0000 mon.vm02 (mon.0) 675 : audit [INF] from='client.? 192.168.123.102:0/118404890' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.791139+0000 mon.vm02 (mon.0) 675 : audit [INF] from='client.? 192.168.123.102:0/118404890' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.796351+0000 mon.vm02 (mon.0) 676 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.796351+0000 mon.vm02 (mon.0) 676 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.799425+0000 mon.vm07 (mon.1) 26 : audit [INF] from='client.? 192.168.123.107:0/2957677464' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:03.799425+0000 mon.vm07 (mon.1) 26 : audit [INF] from='client.? 192.168.123.107:0/2957677464' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.237928+0000 mon.vm02 (mon.0) 677 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.237928+0000 mon.vm02 (mon.0) 677 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.409092+0000 mon.vm02 (mon.0) 678 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.409092+0000 mon.vm02 (mon.0) 678 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.413096+0000 mon.vm02 (mon.0) 679 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.413096+0000 mon.vm02 (mon.0) 679 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.417021+0000 mon.vm02 (mon.0) 680 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.417021+0000 mon.vm02 (mon.0) 680 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.417652+0000 mon.vm02 (mon.0) 681 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.bmgnwf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.417652+0000 mon.vm02 (mon.0) 681 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.bmgnwf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.419666+0000 mon.vm02 (mon.0) 682 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.bmgnwf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.419666+0000 mon.vm02 (mon.0) 682 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm02.bmgnwf", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.423870+0000 mon.vm02 (mon.0) 683 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.423870+0000 mon.vm02 (mon.0) 683 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.425713+0000 mon.vm02 (mon.0) 684 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:04.588 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 bash[17473]: audit 2026-03-10T08:39:04.425713+0000 mon.vm02 (mon.0) 684 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:05.256 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:04 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:05.256 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:05.350 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cluster 2026-03-10T08:39:04.213868+0000 mgr.vm02.ttibzz (mgr.14195) 160 : cluster [DBG] pgmap v102: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cluster 2026-03-10T08:39:04.213868+0000 mgr.vm02.ttibzz (mgr.14195) 160 : cluster [DBG] pgmap v102: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:04.294293+0000 mgr.vm02.ttibzz (mgr.14195) 161 : audit [DBG] from='client.14484 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:04.294293+0000 mgr.vm02.ttibzz (mgr.14195) 161 : audit [DBG] from='client.14484 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cephadm 2026-03-10T08:39:04.426350+0000 mgr.vm02.ttibzz (mgr.14195) 162 : cephadm [INF] Deploying daemon rgw.foo.vm02.bmgnwf on vm02 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cephadm 2026-03-10T08:39:04.426350+0000 mgr.vm02.ttibzz (mgr.14195) 162 : cephadm [INF] Deploying daemon rgw.foo.vm02.bmgnwf on vm02 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cluster 2026-03-10T08:39:04.777769+0000 mon.vm02 (mon.0) 685 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cluster 2026-03-10T08:39:04.777769+0000 mon.vm02 (mon.0) 685 : cluster [WRN] Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:04.779795+0000 mon.vm02 (mon.0) 686 : audit [INF] from='client.? 192.168.123.102:0/118404890' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:04.779795+0000 mon.vm02 (mon.0) 686 : audit [INF] from='client.? 192.168.123.102:0/118404890' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:04.779871+0000 mon.vm02 (mon.0) 687 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:04.779871+0000 mon.vm02 (mon.0) 687 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cluster 2026-03-10T08:39:04.782410+0000 mon.vm02 (mon.0) 688 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: cluster 2026-03-10T08:39:04.782410+0000 mon.vm02 (mon.0) 688 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.278341+0000 mon.vm02 (mon.0) 689 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.278341+0000 mon.vm02 (mon.0) 689 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.282918+0000 mon.vm02 (mon.0) 690 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.282918+0000 mon.vm02 (mon.0) 690 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.292956+0000 mon.vm02 (mon.0) 691 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.292956+0000 mon.vm02 (mon.0) 691 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.305787+0000 mon.vm02 (mon.0) 692 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.305787+0000 mon.vm02 (mon.0) 692 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.315562+0000 mon.vm02 (mon.0) 693 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.315562+0000 mon.vm02 (mon.0) 693 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.321245+0000 mon.vm02 (mon.0) 694 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:05.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:05 vm02 bash[17473]: audit 2026-03-10T08:39:05.321245+0000 mon.vm02 (mon.0) 694 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: cephadm 2026-03-10T08:39:05.293646+0000 mgr.vm02.ttibzz (mgr.14195) 163 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: cephadm 2026-03-10T08:39:05.293646+0000 mgr.vm02.ttibzz (mgr.14195) 163 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: cephadm 2026-03-10T08:39:05.326703+0000 mgr.vm02.ttibzz (mgr.14195) 164 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm07.plwsjk on vm07 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: cephadm 2026-03-10T08:39:05.326703+0000 mgr.vm02.ttibzz (mgr.14195) 164 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm07.plwsjk on vm07 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: cluster 2026-03-10T08:39:05.795948+0000 mon.vm02 (mon.0) 695 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: cluster 2026-03-10T08:39:05.795948+0000 mon.vm02 (mon.0) 695 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.806896+0000 mon.vm02 (mon.0) 696 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.806896+0000 mon.vm02 (mon.0) 696 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.807015+0000 mon.vm02 (mon.0) 697 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.807015+0000 mon.vm02 (mon.0) 697 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.807188+0000 mon.vm02 (mon.0) 698 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.807188+0000 mon.vm02 (mon.0) 698 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.807698+0000 mon.vm02 (mon.0) 699 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.807698+0000 mon.vm02 (mon.0) 699 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.808980+0000 mon.vm07 (mon.1) 27 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.808980+0000 mon.vm07 (mon.1) 27 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.809370+0000 mon.vm07 (mon.1) 28 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.809370+0000 mon.vm07 (mon.1) 28 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.954142+0000 mon.vm02 (mon.0) 700 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:07.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:07 vm02 bash[17473]: audit 2026-03-10T08:39:05.954142+0000 mon.vm02 (mon.0) 700 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: cluster 2026-03-10T08:39:06.214154+0000 mgr.vm02.ttibzz (mgr.14195) 165 : cluster [DBG] pgmap v105: 65 pgs: 4 creating+peering, 14 active+clean, 47 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 341 B/s wr, 2 op/s 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: cluster 2026-03-10T08:39:06.214154+0000 mgr.vm02.ttibzz (mgr.14195) 165 : cluster [DBG] pgmap v105: 65 pgs: 4 creating+peering, 14 active+clean, 47 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 341 B/s wr, 2 op/s 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865095+0000 mon.vm02 (mon.0) 701 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865095+0000 mon.vm02 (mon.0) 701 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865136+0000 mon.vm02 (mon.0) 702 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865136+0000 mon.vm02 (mon.0) 702 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865225+0000 mon.vm02 (mon.0) 703 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865225+0000 mon.vm02 (mon.0) 703 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865293+0000 mon.vm02 (mon.0) 704 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: audit 2026-03-10T08:39:06.865293+0000 mon.vm02 (mon.0) 704 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: cluster 2026-03-10T08:39:06.879487+0000 mon.vm02 (mon.0) 705 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-03-10T08:39:08.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:08 vm02 bash[17473]: cluster 2026-03-10T08:39:06.879487+0000 mon.vm02 (mon.0) 705 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: cluster 2026-03-10T08:39:08.027684+0000 mon.vm02 (mon.0) 706 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: cluster 2026-03-10T08:39:08.027684+0000 mon.vm02 (mon.0) 706 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.032064+0000 mon.vm07 (mon.1) 29 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.032064+0000 mon.vm07 (mon.1) 29 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040284+0000 mon.vm02 (mon.0) 707 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040284+0000 mon.vm02 (mon.0) 707 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040357+0000 mon.vm02 (mon.0) 708 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040357+0000 mon.vm02 (mon.0) 708 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040560+0000 mon.vm02 (mon.0) 709 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040560+0000 mon.vm02 (mon.0) 709 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040703+0000 mon.vm02 (mon.0) 710 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.040703+0000 mon.vm02 (mon.0) 710 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.041255+0000 mon.vm07 (mon.1) 30 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: audit 2026-03-10T08:39:08.041255+0000 mon.vm07 (mon.1) 30 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: cluster 2026-03-10T08:39:08.214463+0000 mgr.vm02.ttibzz (mgr.14195) 166 : cluster [DBG] pgmap v108: 97 pgs: 6 creating+peering, 27 active+clean, 64 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 7.2 KiB/s rd, 1.5 KiB/s wr, 9 op/s 2026-03-10T08:39:09.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:09 vm02 bash[17473]: cluster 2026-03-10T08:39:08.214463+0000 mgr.vm02.ttibzz (mgr.14195) 166 : cluster [DBG] pgmap v108: 97 pgs: 6 creating+peering, 27 active+clean, 64 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 7.2 KiB/s rd, 1.5 KiB/s wr, 9 op/s 2026-03-10T08:39:10.015 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:10.103 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:10 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:10.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:10 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:10.937 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:39:10.937 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:38:55.594452Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:38:55.594700Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:38:55.594561Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:38:55.594420Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:10.753716Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:38:55.594673Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:38:55.594507Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:38:55.594646Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:38:55.594372Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "last_refresh": "2026-03-10T08:38:55.594591Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:05.315869Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:39:05.293652Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034465+0000 mon.vm02 (mon.0) 711 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034465+0000 mon.vm02 (mon.0) 711 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034544+0000 mon.vm02 (mon.0) 712 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034544+0000 mon.vm02 (mon.0) 712 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034598+0000 mon.vm02 (mon.0) 713 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034598+0000 mon.vm02 (mon.0) 713 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034775+0000 mon.vm02 (mon.0) 714 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: audit 2026-03-10T08:39:09.034775+0000 mon.vm02 (mon.0) 714 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: cluster 2026-03-10T08:39:09.042249+0000 mon.vm02 (mon.0) 715 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-03-10T08:39:10.946 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:10 vm02 bash[17473]: cluster 2026-03-10T08:39:09.042249+0000 mon.vm02 (mon.0) 715 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-03-10T08:39:10.997 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T08:39:11.997 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.214775+0000 mgr.vm02.ttibzz (mgr.14195) 167 : cluster [DBG] pgmap v110: 97 pgs: 9 creating+peering, 50 active+clean, 38 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 1.8 KiB/s wr, 10 op/s 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.214775+0000 mgr.vm02.ttibzz (mgr.14195) 167 : cluster [DBG] pgmap v110: 97 pgs: 9 creating+peering, 50 active+clean, 38 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 1.8 KiB/s wr, 10 op/s 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.494003+0000 mon.vm02 (mon.0) 716 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.494003+0000 mon.vm02 (mon.0) 716 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.561380+0000 mon.vm02 (mon.0) 717 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.561380+0000 mon.vm02 (mon.0) 717 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.561441+0000 mon.vm02 (mon.0) 718 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.561441+0000 mon.vm02 (mon.0) 718 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.561836+0000 mon.vm02 (mon.0) 719 : cluster [INF] Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.561836+0000 mon.vm02 (mon.0) 719 : cluster [INF] Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.561844+0000 mon.vm02 (mon.0) 720 : cluster [INF] Cluster is now healthy 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:10.561844+0000 mon.vm02 (mon.0) 720 : cluster [INF] Cluster is now healthy 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.591111+0000 mon.vm07 (mon.1) 31 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.591111+0000 mon.vm07 (mon.1) 31 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.591155+0000 mon.vm07 (mon.1) 32 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.591155+0000 mon.vm07 (mon.1) 32 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.609075+0000 mon.vm02 (mon.0) 721 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.609075+0000 mon.vm02 (mon.0) 721 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.609141+0000 mon.vm02 (mon.0) 722 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.609141+0000 mon.vm02 (mon.0) 722 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.613402+0000 mon.vm02 (mon.0) 723 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.613402+0000 mon.vm02 (mon.0) 723 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.672348+0000 mon.vm02 (mon.0) 724 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.672348+0000 mon.vm02 (mon.0) 724 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.753497+0000 mon.vm02 (mon.0) 725 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:10.753497+0000 mon.vm02 (mon.0) 725 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:12.003 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cephadm 2026-03-10T08:39:10.754838+0000 mgr.vm02.ttibzz (mgr.14195) 168 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm02.rwnyxr on vm02 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cephadm 2026-03-10T08:39:10.754838+0000 mgr.vm02.ttibzz (mgr.14195) 168 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm02.rwnyxr on vm02 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396063+0000 mon.vm02 (mon.0) 726 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396063+0000 mon.vm02 (mon.0) 726 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396151+0000 mon.vm02 (mon.0) 727 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396151+0000 mon.vm02 (mon.0) 727 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396198+0000 mon.vm02 (mon.0) 728 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396198+0000 mon.vm02 (mon.0) 728 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396245+0000 mon.vm02 (mon.0) 729 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.396245+0000 mon.vm02 (mon.0) 729 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:11.399759+0000 mon.vm02 (mon.0) 730 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: cluster 2026-03-10T08:39:11.399759+0000 mon.vm02 (mon.0) 730 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.409004+0000 mon.vm07 (mon.1) 33 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.409004+0000 mon.vm07 (mon.1) 33 : audit [INF] from='client.? 192.168.123.107:0/2499823566' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.409056+0000 mon.vm07 (mon.1) 34 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.409056+0000 mon.vm07 (mon.1) 34 : audit [INF] from='client.? 192.168.123.107:0/2357050314' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.410660+0000 mon.vm02 (mon.0) 731 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.410660+0000 mon.vm02 (mon.0) 731 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.410733+0000 mon.vm02 (mon.0) 732 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.410733+0000 mon.vm02 (mon.0) 732 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.410776+0000 mon.vm02 (mon.0) 733 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.410776+0000 mon.vm02 (mon.0) 733 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.411596+0000 mon.vm02 (mon.0) 734 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.004 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:11 vm02 bash[17473]: audit 2026-03-10T08:39:11.411596+0000 mon.vm02 (mon.0) 734 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:10.929816+0000 mgr.vm02.ttibzz (mgr.14195) 169 : audit [DBG] from='client.24361 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:10.929816+0000 mgr.vm02.ttibzz (mgr.14195) 169 : audit [DBG] from='client.24361 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.398878+0000 mon.vm02 (mon.0) 735 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.398878+0000 mon.vm02 (mon.0) 735 : audit [INF] from='client.? 192.168.123.102:0/690772262' entity='client.rgw.foo.vm02.rugqqv' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.399040+0000 mon.vm02 (mon.0) 736 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.399040+0000 mon.vm02 (mon.0) 736 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.wecerd' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.399100+0000 mon.vm02 (mon.0) 737 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.399100+0000 mon.vm02 (mon.0) 737 : audit [INF] from='client.? ' entity='client.rgw.foo.vm07.zylyez' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.399206+0000 mon.vm02 (mon.0) 738 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: audit 2026-03-10T08:39:12.399206+0000 mon.vm02 (mon.0) 738 : audit [INF] from='client.? 192.168.123.102:0/2784732644' entity='client.rgw.foo.vm02.bmgnwf' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: cluster 2026-03-10T08:39:12.401403+0000 mon.vm02 (mon.0) 739 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-03-10T08:39:12.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:12 vm02 bash[17473]: cluster 2026-03-10T08:39:12.401403+0000 mon.vm02 (mon.0) 739 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-03-10T08:39:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:13 vm02 bash[17473]: cluster 2026-03-10T08:39:12.215071+0000 mgr.vm02.ttibzz (mgr.14195) 170 : cluster [DBG] pgmap v113: 129 pgs: 11 creating+peering, 83 active+clean, 35 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 7.4 KiB/s rd, 1.4 KiB/s wr, 11 op/s 2026-03-10T08:39:14.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:13 vm02 bash[17473]: cluster 2026-03-10T08:39:12.215071+0000 mgr.vm02.ttibzz (mgr.14195) 170 : cluster [DBG] pgmap v113: 129 pgs: 11 creating+peering, 83 active+clean, 35 unknown; 450 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 7.4 KiB/s rd, 1.4 KiB/s wr, 11 op/s 2026-03-10T08:39:15.120 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:15.447 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:16.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: cluster 2026-03-10T08:39:14.215546+0000 mgr.vm02.ttibzz (mgr.14195) 171 : cluster [DBG] pgmap v115: 129 pgs: 4 creating+peering, 112 active+clean, 13 unknown; 451 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 80 KiB/s rd, 2.1 KiB/s wr, 137 op/s 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: cluster 2026-03-10T08:39:14.215546+0000 mgr.vm02.ttibzz (mgr.14195) 171 : cluster [DBG] pgmap v115: 129 pgs: 4 creating+peering, 112 active+clean, 13 unknown; 451 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 80 KiB/s rd, 2.1 KiB/s wr, 137 op/s 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.390278+0000 mon.vm02 (mon.0) 740 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.390278+0000 mon.vm02 (mon.0) 740 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.399933+0000 mon.vm02 (mon.0) 741 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.399933+0000 mon.vm02 (mon.0) 741 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.406058+0000 mon.vm02 (mon.0) 742 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.406058+0000 mon.vm02 (mon.0) 742 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.416667+0000 mon.vm02 (mon.0) 743 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:16.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:15 vm02 bash[17473]: audit 2026-03-10T08:39:15.416667+0000 mon.vm02 (mon.0) 743 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:17.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:16 vm02 bash[17473]: cephadm 2026-03-10T08:39:15.417267+0000 mgr.vm02.ttibzz (mgr.14195) 172 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm07 interface ens3 2026-03-10T08:39:17.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:16 vm02 bash[17473]: cephadm 2026-03-10T08:39:15.417267+0000 mgr.vm02.ttibzz (mgr.14195) 172 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm07 interface ens3 2026-03-10T08:39:17.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:16 vm02 bash[17473]: cephadm 2026-03-10T08:39:15.417312+0000 mgr.vm02.ttibzz (mgr.14195) 173 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm02 interface ens3 2026-03-10T08:39:17.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:16 vm02 bash[17473]: cephadm 2026-03-10T08:39:15.417312+0000 mgr.vm02.ttibzz (mgr.14195) 173 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm02 interface ens3 2026-03-10T08:39:17.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:16 vm02 bash[17473]: cephadm 2026-03-10T08:39:15.419816+0000 mgr.vm02.ttibzz (mgr.14195) 174 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm07.fctrof on vm07 2026-03-10T08:39:17.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:16 vm02 bash[17473]: cephadm 2026-03-10T08:39:15.419816+0000 mgr.vm02.ttibzz (mgr.14195) 174 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm07.fctrof on vm07 2026-03-10T08:39:17.640 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:17.902 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:39:17.902 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:38:55.594452Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:38:55.594700Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:38:55.594561Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:38:55.594420Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:15.406265Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:38:55.594673Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:38:55.594507Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:38:55.594646Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:38:55.594372Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "last_refresh": "2026-03-10T08:38:55.594591Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:05.315869Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:39:05.293652Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T08:39:17.914 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:17 vm02 bash[17473]: cluster 2026-03-10T08:39:16.215974+0000 mgr.vm02.ttibzz (mgr.14195) 175 : cluster [DBG] pgmap v116: 129 pgs: 4 creating+peering, 125 active+clean; 453 KiB data, 218 MiB used, 160 GiB / 160 GiB avail; 256 KiB/s rd, 4.7 KiB/s wr, 447 op/s 2026-03-10T08:39:17.915 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:17 vm02 bash[17473]: cluster 2026-03-10T08:39:16.215974+0000 mgr.vm02.ttibzz (mgr.14195) 175 : cluster [DBG] pgmap v116: 129 pgs: 4 creating+peering, 125 active+clean; 453 KiB data, 218 MiB used, 160 GiB / 160 GiB avail; 256 KiB/s rd, 4.7 KiB/s wr, 447 op/s 2026-03-10T08:39:17.964 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T08:39:18.964 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:39:18.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:18 vm02 bash[17473]: audit 2026-03-10T08:39:17.895683+0000 mgr.vm02.ttibzz (mgr.14195) 176 : audit [DBG] from='client.14520 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:18.970 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:18 vm02 bash[17473]: audit 2026-03-10T08:39:17.895683+0000 mgr.vm02.ttibzz (mgr.14195) 176 : audit [DBG] from='client.14520 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:19 vm02 bash[17473]: cluster 2026-03-10T08:39:18.216454+0000 mgr.vm02.ttibzz (mgr.14195) 177 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 250 KiB/s rd, 4.1 KiB/s wr, 437 op/s 2026-03-10T08:39:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:19 vm02 bash[17473]: cluster 2026-03-10T08:39:18.216454+0000 mgr.vm02.ttibzz (mgr.14195) 177 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 250 KiB/s rd, 4.1 KiB/s wr, 437 op/s 2026-03-10T08:39:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:19 vm02 bash[17473]: audit 2026-03-10T08:39:19.238119+0000 mon.vm02 (mon.0) 744 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:20.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:19 vm02 bash[17473]: audit 2026-03-10T08:39:19.238119+0000 mon.vm02 (mon.0) 744 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:20.592 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:20 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:20.855 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:39:20 vm07 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cluster 2026-03-10T08:39:20.216817+0000 mgr.vm02.ttibzz (mgr.14195) 178 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 224 KiB/s rd, 3.6 KiB/s wr, 392 op/s 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cluster 2026-03-10T08:39:20.216817+0000 mgr.vm02.ttibzz (mgr.14195) 178 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 224 KiB/s rd, 3.6 KiB/s wr, 392 op/s 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: audit 2026-03-10T08:39:20.853468+0000 mon.vm02 (mon.0) 745 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: audit 2026-03-10T08:39:20.853468+0000 mon.vm02 (mon.0) 745 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: audit 2026-03-10T08:39:20.858564+0000 mon.vm02 (mon.0) 746 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: audit 2026-03-10T08:39:20.858564+0000 mon.vm02 (mon.0) 746 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: audit 2026-03-10T08:39:20.862810+0000 mon.vm02 (mon.0) 747 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: audit 2026-03-10T08:39:20.862810+0000 mon.vm02 (mon.0) 747 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cephadm 2026-03-10T08:39:20.863714+0000 mgr.vm02.ttibzz (mgr.14195) 179 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm02 interface ens3 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cephadm 2026-03-10T08:39:20.863714+0000 mgr.vm02.ttibzz (mgr.14195) 179 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm02 interface ens3 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cephadm 2026-03-10T08:39:20.863754+0000 mgr.vm02.ttibzz (mgr.14195) 180 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm07 interface ens3 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cephadm 2026-03-10T08:39:20.863754+0000 mgr.vm02.ttibzz (mgr.14195) 180 : cephadm [INF] 12.12.1.102 is in 12.12.0.0/22 on vm07 interface ens3 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cephadm 2026-03-10T08:39:20.864035+0000 mgr.vm02.ttibzz (mgr.14195) 181 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm02.sbacfj on vm02 2026-03-10T08:39:22.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:21 vm02 bash[17473]: cephadm 2026-03-10T08:39:20.864035+0000 mgr.vm02.ttibzz (mgr.14195) 181 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm02.sbacfj on vm02 2026-03-10T08:39:23.604 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:23.939 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:39:23.939 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:38:55.594452Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:38:55.594700Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:38:55.594561Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:38:55.594420Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:20.863004Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:38:55.594673Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:38:55.594507Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:38:55.594646Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:38:55.594372Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "last_refresh": "2026-03-10T08:38:55.594591Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:05.315869Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:39:05.293652Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T08:39:23.949 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:23 vm02 bash[17473]: cluster 2026-03-10T08:39:22.217290+0000 mgr.vm02.ttibzz (mgr.14195) 182 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 197 KiB/s rd, 3.2 KiB/s wr, 346 op/s 2026-03-10T08:39:23.949 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:23 vm02 bash[17473]: cluster 2026-03-10T08:39:22.217290+0000 mgr.vm02.ttibzz (mgr.14195) 182 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 197 KiB/s rd, 3.2 KiB/s wr, 346 op/s 2026-03-10T08:39:24.033 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T08:39:25.034 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:39:26.102 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:25 vm02 bash[17473]: audit 2026-03-10T08:39:23.934078+0000 mgr.vm02.ttibzz (mgr.14195) 183 : audit [DBG] from='client.14524 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:26.102 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:25 vm02 bash[17473]: audit 2026-03-10T08:39:23.934078+0000 mgr.vm02.ttibzz (mgr.14195) 183 : audit [DBG] from='client.14524 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:26.102 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:25 vm02 bash[17473]: cluster 2026-03-10T08:39:24.217674+0000 mgr.vm02.ttibzz (mgr.14195) 184 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 167 KiB/s rd, 2.7 KiB/s wr, 292 op/s 2026-03-10T08:39:26.102 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:25 vm02 bash[17473]: cluster 2026-03-10T08:39:24.217674+0000 mgr.vm02.ttibzz (mgr.14195) 184 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 167 KiB/s rd, 2.7 KiB/s wr, 292 op/s 2026-03-10T08:39:26.102 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:26.355 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 systemd[1]: /etc/systemd/system/ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:25.961807+0000 mon.vm02 (mon.0) 748 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:25.961807+0000 mon.vm02 (mon.0) 748 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: cluster 2026-03-10T08:39:26.218069+0000 mgr.vm02.ttibzz (mgr.14195) 185 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 133 KiB/s rd, 2.0 KiB/s wr, 232 op/s 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: cluster 2026-03-10T08:39:26.218069+0000 mgr.vm02.ttibzz (mgr.14195) 185 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 133 KiB/s rd, 2.0 KiB/s wr, 232 op/s 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.346947+0000 mon.vm02 (mon.0) 749 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.346947+0000 mon.vm02 (mon.0) 749 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.358009+0000 mon.vm02 (mon.0) 750 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.358009+0000 mon.vm02 (mon.0) 750 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.363139+0000 mon.vm02 (mon.0) 751 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.363139+0000 mon.vm02 (mon.0) 751 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.369178+0000 mon.vm02 (mon.0) 752 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.369178+0000 mon.vm02 (mon.0) 752 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.380679+0000 mon.vm02 (mon.0) 753 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:39:27.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:26 vm02 bash[17473]: audit 2026-03-10T08:39:26.380679+0000 mon.vm02 (mon.0) 753 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:39:29.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:29 vm02 bash[17473]: cluster 2026-03-10T08:39:28.218499+0000 mgr.vm02.ttibzz (mgr.14195) 186 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 511 B/s wr, 67 op/s 2026-03-10T08:39:29.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:29 vm02 bash[17473]: cluster 2026-03-10T08:39:28.218499+0000 mgr.vm02.ttibzz (mgr.14195) 186 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 38 KiB/s rd, 511 B/s wr, 67 op/s 2026-03-10T08:39:30.698 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:31.009 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:39:31.009 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:38:55.594452Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:38:55.594700Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:38:55.594561Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:38:55.594420Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:26.369355Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:38:55.594673Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:38:55.594507Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:38:55.594646Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:38:55.594372Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "last_refresh": "2026-03-10T08:38:55.594591Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:05.315869Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:39:05.293652Z", "ports": [8000], "running": 0, "size": 4}}] 2026-03-10T08:39:31.073 INFO:tasks.cephadm:rgw.foo has 0/4 2026-03-10T08:39:31.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:31 vm02 bash[17473]: cluster 2026-03-10T08:39:30.218881+0000 mgr.vm02.ttibzz (mgr.14195) 187 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.8 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T08:39:31.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:31 vm02 bash[17473]: cluster 2026-03-10T08:39:30.218881+0000 mgr.vm02.ttibzz (mgr.14195) 187 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.8 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-10T08:39:31.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:31 vm02 bash[17473]: audit 2026-03-10T08:39:30.967623+0000 mon.vm02 (mon.0) 754 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:31.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:31 vm02 bash[17473]: audit 2026-03-10T08:39:30.967623+0000 mon.vm02 (mon.0) 754 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.074 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.002877+0000 mgr.vm02.ttibzz (mgr.14195) 188 : audit [DBG] from='client.14528 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.002877+0000 mgr.vm02.ttibzz (mgr.14195) 188 : audit [DBG] from='client.14528 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.465956+0000 mon.vm02 (mon.0) 755 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.465956+0000 mon.vm02 (mon.0) 755 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.471074+0000 mon.vm02 (mon.0) 756 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.471074+0000 mon.vm02 (mon.0) 756 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.562013+0000 mon.vm02 (mon.0) 757 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.562013+0000 mon.vm02 (mon.0) 757 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.566853+0000 mon.vm02 (mon.0) 758 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.566853+0000 mon.vm02 (mon.0) 758 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.567572+0000 mon.vm02 (mon.0) 759 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.567572+0000 mon.vm02 (mon.0) 759 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.568028+0000 mon.vm02 (mon.0) 760 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.568028+0000 mon.vm02 (mon.0) 760 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: cephadm 2026-03-10T08:39:31.570514+0000 mgr.vm02.ttibzz (mgr.14195) 189 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: cephadm 2026-03-10T08:39:31.570514+0000 mgr.vm02.ttibzz (mgr.14195) 189 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.801838+0000 mon.vm02 (mon.0) 761 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.801838+0000 mon.vm02 (mon.0) 761 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.806410+0000 mon.vm02 (mon.0) 762 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.806410+0000 mon.vm02 (mon.0) 762 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.810676+0000 mon.vm02 (mon.0) 763 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.810676+0000 mon.vm02 (mon.0) 763 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.816715+0000 mon.vm02 (mon.0) 764 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: audit 2026-03-10T08:39:31.816715+0000 mon.vm02 (mon.0) 764 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: cephadm 2026-03-10T08:39:31.829042+0000 mgr.vm02.ttibzz (mgr.14195) 190 : cephadm [INF] Reconfiguring prometheus.vm02 (dependencies changed)... 2026-03-10T08:39:32.790 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:32 vm02 bash[17473]: cephadm 2026-03-10T08:39:31.829042+0000 mgr.vm02.ttibzz (mgr.14195) 190 : cephadm [INF] Reconfiguring prometheus.vm02 (dependencies changed)... 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: cephadm 2026-03-10T08:39:31.978391+0000 mgr.vm02.ttibzz (mgr.14195) 191 : cephadm [INF] Reconfiguring daemon prometheus.vm02 on vm02 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: cephadm 2026-03-10T08:39:31.978391+0000 mgr.vm02.ttibzz (mgr.14195) 191 : cephadm [INF] Reconfiguring daemon prometheus.vm02 on vm02 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: cluster 2026-03-10T08:39:32.219558+0000 mgr.vm02.ttibzz (mgr.14195) 192 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: cluster 2026-03-10T08:39:32.219558+0000 mgr.vm02.ttibzz (mgr.14195) 192 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.547360+0000 mon.vm02 (mon.0) 765 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.547360+0000 mon.vm02 (mon.0) 765 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.552979+0000 mon.vm02 (mon.0) 766 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.552979+0000 mon.vm02 (mon.0) 766 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.557654+0000 mon.vm02 (mon.0) 767 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.557654+0000 mon.vm02 (mon.0) 767 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.558023+0000 mgr.vm02.ttibzz (mgr.14195) 193 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.558023+0000 mgr.vm02.ttibzz (mgr.14195) 193 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.600229+0000 mon.vm02 (mon.0) 768 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:39:34.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:33 vm02 bash[17473]: audit 2026-03-10T08:39:32.600229+0000 mon.vm02 (mon.0) 768 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:39:35.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:34 vm02 bash[17473]: audit 2026-03-10T08:39:34.238384+0000 mon.vm02 (mon.0) 769 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:35.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:34 vm02 bash[17473]: audit 2026-03-10T08:39:34.238384+0000 mon.vm02 (mon.0) 769 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:36.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:35 vm02 bash[17473]: cluster 2026-03-10T08:39:34.220038+0000 mgr.vm02.ttibzz (mgr.14195) 194 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:39:36.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:35 vm02 bash[17473]: cluster 2026-03-10T08:39:34.220038+0000 mgr.vm02.ttibzz (mgr.14195) 194 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:39:36.719 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:37.032 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:39:37.032 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:39:31.460403Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:39:31.460703Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:39:31.460573Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:39:31.460336Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:26.369355Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "last_refresh": "2026-03-10T08:39:31.460370Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:39:31.460677Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:39:31.460461Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:39:31.460651Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:39:31.460286Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "ports": [9095], "running": 0, "size": 1}}, {"events": ["2026-03-10T08:39:05.315869Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:39:05.293652Z", "last_refresh": "2026-03-10T08:39:31.460545Z", "ports": [8000], "running": 4, "size": 4}}] 2026-03-10T08:39:37.111 INFO:tasks.cephadm:rgw.foo has 4/4 2026-03-10T08:39:37.111 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-10T08:39:37.114 INFO:tasks.cephadm:Waiting for ceph service ingress.rgw.foo to start (timeout 300)... 2026-03-10T08:39:37.114 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph orch ls -f json 2026-03-10T08:39:38.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:37 vm02 bash[17473]: cluster 2026-03-10T08:39:36.220442+0000 mgr.vm02.ttibzz (mgr.14195) 195 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 170 B/s wr, 1 op/s 2026-03-10T08:39:38.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:37 vm02 bash[17473]: cluster 2026-03-10T08:39:36.220442+0000 mgr.vm02.ttibzz (mgr.14195) 195 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 170 B/s wr, 1 op/s 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.026435+0000 mgr.vm02.ttibzz (mgr.14195) 196 : audit [DBG] from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.026435+0000 mgr.vm02.ttibzz (mgr.14195) 196 : audit [DBG] from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.573398+0000 mon.vm02 (mon.0) 770 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.573398+0000 mon.vm02 (mon.0) 770 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.579899+0000 mon.vm02 (mon.0) 771 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.579899+0000 mon.vm02 (mon.0) 771 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.850230+0000 mon.vm02 (mon.0) 772 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.850230+0000 mon.vm02 (mon.0) 772 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.856327+0000 mon.vm02 (mon.0) 773 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:37.856327+0000 mon.vm02 (mon.0) 773 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.149102+0000 mon.vm02 (mon.0) 774 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.149102+0000 mon.vm02 (mon.0) 774 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.149798+0000 mon.vm02 (mon.0) 775 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.149798+0000 mon.vm02 (mon.0) 775 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.305744+0000 mon.vm02 (mon.0) 776 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.305744+0000 mon.vm02 (mon.0) 776 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.308041+0000 mon.vm02 (mon.0) 777 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:39.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:38 vm02 bash[17473]: audit 2026-03-10T08:39:38.308041+0000 mon.vm02 (mon.0) 777 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:40.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:39 vm02 bash[17473]: cephadm 2026-03-10T08:39:38.152336+0000 mgr.vm02.ttibzz (mgr.14195) 197 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-03-10T08:39:40.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:39 vm02 bash[17473]: cephadm 2026-03-10T08:39:38.152336+0000 mgr.vm02.ttibzz (mgr.14195) 197 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-03-10T08:39:40.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:39 vm02 bash[17473]: cluster 2026-03-10T08:39:38.220898+0000 mgr.vm02.ttibzz (mgr.14195) 198 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 170 B/s wr, 2 op/s 2026-03-10T08:39:40.040 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:39 vm02 bash[17473]: cluster 2026-03-10T08:39:38.220898+0000 mgr.vm02.ttibzz (mgr.14195) 198 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 170 B/s wr, 2 op/s 2026-03-10T08:39:41.782 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:42.024 INFO:teuthology.orchestra.run.vm02.stdout: 2026-03-10T08:39:42.024 INFO:teuthology.orchestra.run.vm02.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-10T08:35:20.842677Z", "last_refresh": "2026-03-10T08:39:37.567579Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:36:12.013728Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-10T08:35:19.468871Z", "last_refresh": "2026-03-10T08:39:37.567955Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:12.805296Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-10T08:35:19.169069Z", "last_refresh": "2026-03-10T08:39:37.567794Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-10T08:35:20.141255Z", "last_refresh": "2026-03-10T08:39:37.565170Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:26.369355Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.102/22"}, "status": {"created": "2026-03-10T08:38:55.205436Z", "last_refresh": "2026-03-10T08:39:37.565237Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.102/22"}}, {"events": ["2026-03-10T08:36:14.365711Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-10T08:35:18.882993Z", "last_refresh": "2026-03-10T08:39:37.567926Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:15.520477Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm02:192.168.123.102=vm02", "vm07:192.168.123.107=vm07"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-10T08:36:00.723693Z", "last_refresh": "2026-03-10T08:39:37.567671Z", "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:13.564481Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-10T08:35:20.546162Z", "last_refresh": "2026-03-10T08:39:37.567889Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-10T08:36:34.804220Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-10T08:36:34.800913Z", "last_refresh": "2026-03-10T08:39:37.565052Z", "running": 8, "size": 8}}, {"events": ["2026-03-10T08:36:15.524081Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-10T08:35:19.796756Z", "last_refresh": "2026-03-10T08:39:37.567827Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-10T08:39:05.315869Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_frontend_port": 8000}, "status": {"created": "2026-03-10T08:39:05.293652Z", "last_refresh": "2026-03-10T08:39:37.567766Z", "ports": [8000], "running": 4, "size": 4}}] 2026-03-10T08:39:42.070 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:41 vm02 bash[17473]: cluster 2026-03-10T08:39:40.221321+0000 mgr.vm02.ttibzz (mgr.14195) 199 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 170 B/s wr, 2 op/s 2026-03-10T08:39:42.070 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:41 vm02 bash[17473]: cluster 2026-03-10T08:39:40.221321+0000 mgr.vm02.ttibzz (mgr.14195) 199 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 170 B/s wr, 2 op/s 2026-03-10T08:39:42.071 INFO:tasks.cephadm:ingress.rgw.foo has 4/4 2026-03-10T08:39:42.071 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T08:39:42.074 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm02.local 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'echo "Check while healthy..." 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> curl http://12.12.1.102:9000/ 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> # stop each rgw in turn 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> echo "Check with each rgw stopped in turn..." 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> for rgw in `ceph orch ps | grep ^rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> ceph orch daemon stop $rgw 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep stopped; do echo '"'"'Waiting for $rgw to stop'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> timeout 300 bash -c "while ! curl http://12.12.1.102:9000/ ; do echo '"'"'Waiting for http://12.12.1.102:9000/ to be available'"'"'; sleep 1 ; done" 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> ceph orch daemon start $rgw 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep running; do echo '"'"'Waiting for $rgw to start'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> done 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> # stop each haproxy in turn 2026-03-10T08:39:42.074 DEBUG:teuthology.orchestra.run.vm02:> echo "Check with each haproxy down in turn..." 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> ceph orch daemon stop $haproxy 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep stopped; do echo '"'"'Waiting for $haproxy to stop'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> timeout 300 bash -c "while ! curl http://12.12.1.102:9000/ ; do echo '"'"'Waiting for http://12.12.1.102:9000/ to be available'"'"'; sleep 1 ; done" 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> ceph orch daemon start $haproxy 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep running; do echo '"'"'Waiting for $haproxy to start'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> done 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> 2026-03-10T08:39:42.075 DEBUG:teuthology.orchestra.run.vm02:> timeout 300 bash -c "while ! curl http://12.12.1.102:9000/ ; do echo '"'"'Waiting for http://12.12.1.102:9000/ to be available'"'"'; sleep 1 ; done"' 2026-03-10T08:39:44.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:43 vm02 bash[17473]: audit 2026-03-10T08:39:42.018833+0000 mgr.vm02.ttibzz (mgr.14195) 200 : audit [DBG] from='client.14596 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:44.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:43 vm02 bash[17473]: audit 2026-03-10T08:39:42.018833+0000 mgr.vm02.ttibzz (mgr.14195) 200 : audit [DBG] from='client.14596 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-10T08:39:44.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:43 vm02 bash[17473]: cluster 2026-03-10T08:39:42.221710+0000 mgr.vm02.ttibzz (mgr.14195) 201 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 170 B/s wr, 3 op/s 2026-03-10T08:39:44.290 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:43 vm02 bash[17473]: cluster 2026-03-10T08:39:42.221710+0000 mgr.vm02.ttibzz (mgr.14195) 201 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 170 B/s wr, 3 op/s 2026-03-10T08:39:45.821 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:39:45.911 INFO:teuthology.orchestra.run.vm02.stdout:Check while healthy... 2026-03-10T08:39:45.913 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:39:45.913 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:39:45.914 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T08:39:45.915 INFO:teuthology.orchestra.run.vm02.stdout:anonymousCheck with each rgw stopped in turn... 2026-03-10T08:39:46.239 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled to stop rgw.foo.vm02.bmgnwf on host 'vm02' 2026-03-10T08:39:46.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:45 vm02 bash[17473]: cluster 2026-03-10T08:39:44.222118+0000 mgr.vm02.ttibzz (mgr.14195) 202 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 170 B/s wr, 3 op/s 2026-03-10T08:39:46.289 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:45 vm02 bash[17473]: cluster 2026-03-10T08:39:44.222118+0000 mgr.vm02.ttibzz (mgr.14195) 202 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 170 B/s wr, 3 op/s 2026-03-10T08:39:46.443 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:39:46.603 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:39:46.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (41s) 9s ago 41s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e 137e91d3df67 2026-03-10T08:39:46.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (43s) 9s ago 43s 93.8M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:39:46.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (43s) 8s ago 43s 90.3M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:39:46.603 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (42s) 8s ago 42s 90.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:39:46.784 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.053653+0000 mgr.vm02.ttibzz (mgr.14195) 203 : audit [DBG] from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.053653+0000 mgr.vm02.ttibzz (mgr.14195) 203 : audit [DBG] from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.215924+0000 mgr.vm02.ttibzz (mgr.14195) 204 : audit [DBG] from='client.24409 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm02.bmgnwf", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.215924+0000 mgr.vm02.ttibzz (mgr.14195) 204 : audit [DBG] from='client.24409 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm02.bmgnwf", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: cephadm 2026-03-10T08:39:46.216258+0000 mgr.vm02.ttibzz (mgr.14195) 205 : cephadm [INF] Schedule stop daemon rgw.foo.vm02.bmgnwf 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: cephadm 2026-03-10T08:39:46.216258+0000 mgr.vm02.ttibzz (mgr.14195) 205 : cephadm [INF] Schedule stop daemon rgw.foo.vm02.bmgnwf 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.221529+0000 mon.vm02 (mon.0) 778 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.221529+0000 mon.vm02 (mon.0) 778 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:47.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: cluster 2026-03-10T08:39:46.222456+0000 mgr.vm02.ttibzz (mgr.14195) 206 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.8 KiB/s rd, 853 B/s wr, 4 op/s 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: cluster 2026-03-10T08:39:46.222456+0000 mgr.vm02.ttibzz (mgr.14195) 206 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 3.8 KiB/s rd, 853 B/s wr, 4 op/s 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.227552+0000 mon.vm02 (mon.0) 779 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.227552+0000 mon.vm02 (mon.0) 779 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.228574+0000 mon.vm02 (mon.0) 780 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.228574+0000 mon.vm02 (mon.0) 780 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.415372+0000 mgr.vm02.ttibzz (mgr.14195) 207 : audit [DBG] from='client.14608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.415372+0000 mgr.vm02.ttibzz (mgr.14195) 207 : audit [DBG] from='client.14608 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.597935+0000 mgr.vm02.ttibzz (mgr.14195) 208 : audit [DBG] from='client.14612 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.597935+0000 mgr.vm02.ttibzz (mgr.14195) 208 : audit [DBG] from='client.14612 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.781721+0000 mon.vm02 (mon.0) 781 : audit [DBG] from='client.? 192.168.123.102:0/2255633618' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:39:47.540 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:47 vm02 bash[17473]: audit 2026-03-10T08:39:46.781721+0000 mon.vm02 (mon.0) 781 : audit [DBG] from='client.? 192.168.123.102:0/2255633618' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:39:49.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:49 vm02 bash[17473]: cluster 2026-03-10T08:39:48.222976+0000 mgr.vm02.ttibzz (mgr.14195) 209 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 682 B/s wr, 2 op/s 2026-03-10T08:39:49.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:49 vm02 bash[17473]: cluster 2026-03-10T08:39:48.222976+0000 mgr.vm02.ttibzz (mgr.14195) 209 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 2.3 KiB/s rd, 682 B/s wr, 2 op/s 2026-03-10T08:39:50.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:50 vm02 bash[17473]: audit 2026-03-10T08:39:49.238999+0000 mon.vm02 (mon.0) 782 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:50.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:50 vm02 bash[17473]: audit 2026-03-10T08:39:49.238999+0000 mon.vm02 (mon.0) 782 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:39:51.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:51 vm02 bash[17473]: cluster 2026-03-10T08:39:50.223379+0000 mgr.vm02.ttibzz (mgr.14195) 210 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 682 B/s wr, 2 op/s 2026-03-10T08:39:51.539 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:51 vm02 bash[17473]: cluster 2026-03-10T08:39:50.223379+0000 mgr.vm02.ttibzz (mgr.14195) 210 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 682 B/s wr, 2 op/s 2026-03-10T08:39:51.960 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:39:52.103 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:39:52.103 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (46s) 0s ago 46s 92.9M - 19.2.3-678-ge911bdeb 654f31e6858e 137e91d3df67 2026-03-10T08:39:52.103 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (48s) 0s ago 48s 95.3M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:39:52.103 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (49s) 0s ago 49s 91.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:39:52.103 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (47s) 0s ago 47s 91.2M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:39:52.283 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.426732+0000 mon.vm02 (mon.0) 783 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.426732+0000 mon.vm02 (mon.0) 783 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.431842+0000 mon.vm02 (mon.0) 784 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.431842+0000 mon.vm02 (mon.0) 784 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.729296+0000 mon.vm02 (mon.0) 785 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.729296+0000 mon.vm02 (mon.0) 785 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.734026+0000 mon.vm02 (mon.0) 786 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.734026+0000 mon.vm02 (mon.0) 786 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.734758+0000 mon.vm02 (mon.0) 787 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.734758+0000 mon.vm02 (mon.0) 787 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.735201+0000 mon.vm02 (mon.0) 788 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.735201+0000 mon.vm02 (mon.0) 788 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.738299+0000 mon.vm02 (mon.0) 789 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.738299+0000 mon.vm02 (mon.0) 789 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.739576+0000 mon.vm02 (mon.0) 790 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:51.739576+0000 mon.vm02 (mon.0) 790 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:52.281378+0000 mon.vm02 (mon.0) 791 : audit [DBG] from='client.? 192.168.123.102:0/351343393' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:39:52.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:52 vm02 bash[17473]: audit 2026-03-10T08:39:52.281378+0000 mon.vm02 (mon.0) 791 : audit [DBG] from='client.? 192.168.123.102:0/351343393' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:39:54.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:53 vm02 bash[17473]: audit 2026-03-10T08:39:51.940316+0000 mgr.vm02.ttibzz (mgr.14195) 211 : audit [DBG] from='client.14620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:54.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:53 vm02 bash[17473]: audit 2026-03-10T08:39:51.940316+0000 mgr.vm02.ttibzz (mgr.14195) 211 : audit [DBG] from='client.14620 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:54.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:53 vm02 bash[17473]: audit 2026-03-10T08:39:52.098675+0000 mgr.vm02.ttibzz (mgr.14195) 212 : audit [DBG] from='client.14624 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:54.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:53 vm02 bash[17473]: audit 2026-03-10T08:39:52.098675+0000 mgr.vm02.ttibzz (mgr.14195) 212 : audit [DBG] from='client.14624 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:54.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:53 vm02 bash[17473]: cluster 2026-03-10T08:39:52.223781+0000 mgr.vm02.ttibzz (mgr.14195) 213 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 682 B/s wr, 2 op/s 2026-03-10T08:39:54.039 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:53 vm02 bash[17473]: cluster 2026-03-10T08:39:52.223781+0000 mgr.vm02.ttibzz (mgr.14195) 213 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 682 B/s wr, 2 op/s 2026-03-10T08:39:56.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:55 vm02 bash[17473]: cluster 2026-03-10T08:39:54.224178+0000 mgr.vm02.ttibzz (mgr.14195) 214 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-03-10T08:39:56.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:55 vm02 bash[17473]: cluster 2026-03-10T08:39:54.224178+0000 mgr.vm02.ttibzz (mgr.14195) 214 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-03-10T08:39:57.451 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:39:57.600 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:39:57.600 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (52s) 5s ago 52s 92.9M - 19.2.3-678-ge911bdeb 654f31e6858e 137e91d3df67 2026-03-10T08:39:57.600 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (54s) 5s ago 54s 95.3M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:39:57.600 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (54s) 6s ago 54s 91.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:39:57.600 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (53s) 6s ago 53s 91.2M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:39:57.791 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:39:58.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:57 vm02 bash[17473]: cluster 2026-03-10T08:39:56.224531+0000 mgr.vm02.ttibzz (mgr.14195) 215 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-03-10T08:39:58.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:57 vm02 bash[17473]: cluster 2026-03-10T08:39:56.224531+0000 mgr.vm02.ttibzz (mgr.14195) 215 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-03-10T08:39:59.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:58 vm02 bash[17473]: audit 2026-03-10T08:39:57.435583+0000 mgr.vm02.ttibzz (mgr.14195) 216 : audit [DBG] from='client.24431 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:59.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:58 vm02 bash[17473]: audit 2026-03-10T08:39:57.435583+0000 mgr.vm02.ttibzz (mgr.14195) 216 : audit [DBG] from='client.24431 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:59.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:58 vm02 bash[17473]: audit 2026-03-10T08:39:57.595102+0000 mgr.vm02.ttibzz (mgr.14195) 217 : audit [DBG] from='client.24433 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:59.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:58 vm02 bash[17473]: audit 2026-03-10T08:39:57.595102+0000 mgr.vm02.ttibzz (mgr.14195) 217 : audit [DBG] from='client.24433 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:39:59.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:58 vm02 bash[17473]: audit 2026-03-10T08:39:57.789812+0000 mon.vm02 (mon.0) 792 : audit [DBG] from='client.? 192.168.123.102:0/1293837981' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:39:59.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:58 vm02 bash[17473]: audit 2026-03-10T08:39:57.789812+0000 mon.vm02 (mon.0) 792 : audit [DBG] from='client.? 192.168.123.102:0/1293837981' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:00.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:59 vm02 bash[17473]: cluster 2026-03-10T08:39:58.224876+0000 mgr.vm02.ttibzz (mgr.14195) 218 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:00.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:39:59 vm02 bash[17473]: cluster 2026-03-10T08:39:58.224876+0000 mgr.vm02.ttibzz (mgr.14195) 218 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:01.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:00 vm02 bash[17473]: cluster 2026-03-10T08:40:00.000085+0000 mon.vm02 (mon.0) 793 : cluster [INF] overall HEALTH_OK 2026-03-10T08:40:01.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:00 vm02 bash[17473]: cluster 2026-03-10T08:40:00.000085+0000 mon.vm02 (mon.0) 793 : cluster [INF] overall HEALTH_OK 2026-03-10T08:40:02.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:01 vm02 bash[17473]: cluster 2026-03-10T08:40:00.225236+0000 mgr.vm02.ttibzz (mgr.14195) 219 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:40:02.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:01 vm02 bash[17473]: cluster 2026-03-10T08:40:00.225236+0000 mgr.vm02.ttibzz (mgr.14195) 219 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:40:02.959 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:03.108 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:03.108 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (57s) 11s ago 57s 92.9M - 19.2.3-678-ge911bdeb 654f31e6858e 137e91d3df67 2026-03-10T08:40:03.108 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (59s) 11s ago 59s 95.3M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:03.108 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (60s) 11s ago 60s 91.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:03.108 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (58s) 11s ago 58s 91.2M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:03.292 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:40:04.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:03 vm02 bash[17473]: cluster 2026-03-10T08:40:02.225663+0000 mgr.vm02.ttibzz (mgr.14195) 220 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:40:04.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:03 vm02 bash[17473]: cluster 2026-03-10T08:40:02.225663+0000 mgr.vm02.ttibzz (mgr.14195) 220 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:40:04.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:03 vm02 bash[17473]: audit 2026-03-10T08:40:03.291109+0000 mon.vm02 (mon.0) 794 : audit [DBG] from='client.? 192.168.123.102:0/3299559621' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:04.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:03 vm02 bash[17473]: audit 2026-03-10T08:40:03.291109+0000 mon.vm02 (mon.0) 794 : audit [DBG] from='client.? 192.168.123.102:0/3299559621' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:05.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:02.943566+0000 mgr.vm02.ttibzz (mgr.14195) 221 : audit [DBG] from='client.14644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:05.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:02.943566+0000 mgr.vm02.ttibzz (mgr.14195) 221 : audit [DBG] from='client.14644 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:05.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:03.104374+0000 mgr.vm02.ttibzz (mgr.14195) 222 : audit [DBG] from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:05.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:03.104374+0000 mgr.vm02.ttibzz (mgr.14195) 222 : audit [DBG] from='client.14648 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:05.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:04.202077+0000 mon.vm02 (mon.0) 795 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]: dispatch 2026-03-10T08:40:05.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:04.202077+0000 mon.vm02 (mon.0) 795 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]: dispatch 2026-03-10T08:40:05.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:04.238576+0000 mon.vm02 (mon.0) 796 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:05.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:04 vm02 bash[17473]: audit 2026-03-10T08:40:04.238576+0000 mon.vm02 (mon.0) 796 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:06.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:05 vm02 bash[17473]: cluster 2026-03-10T08:40:04.225978+0000 mgr.vm02.ttibzz (mgr.14195) 223 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:40:06.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:05 vm02 bash[17473]: cluster 2026-03-10T08:40:04.225978+0000 mgr.vm02.ttibzz (mgr.14195) 223 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:40:06.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:05 vm02 bash[17473]: audit 2026-03-10T08:40:04.795515+0000 mon.vm02 (mon.0) 797 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]': finished 2026-03-10T08:40:06.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:05 vm02 bash[17473]: audit 2026-03-10T08:40:04.795515+0000 mon.vm02 (mon.0) 797 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]': finished 2026-03-10T08:40:06.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:05 vm02 bash[17473]: cluster 2026-03-10T08:40:04.807496+0000 mon.vm02 (mon.0) 798 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-03-10T08:40:06.288 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:05 vm02 bash[17473]: cluster 2026-03-10T08:40:04.807496+0000 mon.vm02 (mon.0) 798 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-03-10T08:40:07.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: cluster 2026-03-10T08:40:05.803177+0000 mon.vm02 (mon.0) 799 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-03-10T08:40:07.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: cluster 2026-03-10T08:40:05.803177+0000 mon.vm02 (mon.0) 799 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-03-10T08:40:07.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: audit 2026-03-10T08:40:06.676488+0000 mon.vm02 (mon.0) 800 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:07.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: audit 2026-03-10T08:40:06.676488+0000 mon.vm02 (mon.0) 800 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:07.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: audit 2026-03-10T08:40:06.681306+0000 mon.vm02 (mon.0) 801 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:07.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: audit 2026-03-10T08:40:06.681306+0000 mon.vm02 (mon.0) 801 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:07.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: audit 2026-03-10T08:40:06.708341+0000 mon.vm02 (mon.0) 802 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:40:07.038 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:06 vm02 bash[17473]: audit 2026-03-10T08:40:06.708341+0000 mon.vm02 (mon.0) 802 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:40:08.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:07 vm02 bash[17473]: cluster 2026-03-10T08:40:06.226256+0000 mgr.vm02.ttibzz (mgr.14195) 224 : cluster [DBG] pgmap v143: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T08:40:08.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:07 vm02 bash[17473]: cluster 2026-03-10T08:40:06.226256+0000 mgr.vm02.ttibzz (mgr.14195) 224 : cluster [DBG] pgmap v143: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 255 B/s wr, 0 op/s 2026-03-10T08:40:08.470 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:08.621 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:08.621 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (63s) 16s ago 63s 92.9M - 19.2.3-678-ge911bdeb 654f31e6858e 137e91d3df67 2026-03-10T08:40:08.622 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (65s) 16s ago 65s 95.3M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:08.622 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (65s) 17s ago 65s 91.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:08.622 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (64s) 17s ago 64s 91.2M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:08.803 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:40:09.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:08 vm02 bash[17473]: audit 2026-03-10T08:40:08.801911+0000 mon.vm02 (mon.0) 803 : audit [DBG] from='client.? 192.168.123.102:0/1497983468' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:09.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:08 vm02 bash[17473]: audit 2026-03-10T08:40:08.801911+0000 mon.vm02 (mon.0) 803 : audit [DBG] from='client.? 192.168.123.102:0/1497983468' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:10.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:09 vm02 bash[17473]: cluster 2026-03-10T08:40:08.226608+0000 mgr.vm02.ttibzz (mgr.14195) 225 : cluster [DBG] pgmap v144: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:10.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:09 vm02 bash[17473]: cluster 2026-03-10T08:40:08.226608+0000 mgr.vm02.ttibzz (mgr.14195) 225 : cluster [DBG] pgmap v144: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:10.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:09 vm02 bash[17473]: audit 2026-03-10T08:40:08.454254+0000 mgr.vm02.ttibzz (mgr.14195) 226 : audit [DBG] from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:10.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:09 vm02 bash[17473]: audit 2026-03-10T08:40:08.454254+0000 mgr.vm02.ttibzz (mgr.14195) 226 : audit [DBG] from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:10.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:09 vm02 bash[17473]: audit 2026-03-10T08:40:08.618186+0000 mgr.vm02.ttibzz (mgr.14195) 227 : audit [DBG] from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:10.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:09 vm02 bash[17473]: audit 2026-03-10T08:40:08.618186+0000 mgr.vm02.ttibzz (mgr.14195) 227 : audit [DBG] from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:12.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:11 vm02 bash[17473]: cluster 2026-03-10T08:40:10.227811+0000 mgr.vm02.ttibzz (mgr.14195) 228 : cluster [DBG] pgmap v145: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:12.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:11 vm02 bash[17473]: cluster 2026-03-10T08:40:10.227811+0000 mgr.vm02.ttibzz (mgr.14195) 228 : cluster [DBG] pgmap v145: 129 pgs: 1 peering, 128 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:11.886829+0000 mon.vm02 (mon.0) 804 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:11.886829+0000 mon.vm02 (mon.0) 804 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:11.891920+0000 mon.vm02 (mon.0) 805 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:11.891920+0000 mon.vm02 (mon.0) 805 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.193518+0000 mon.vm02 (mon.0) 806 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.193518+0000 mon.vm02 (mon.0) 806 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.198848+0000 mon.vm02 (mon.0) 807 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.198848+0000 mon.vm02 (mon.0) 807 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.199955+0000 mon.vm02 (mon.0) 808 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.199955+0000 mon.vm02 (mon.0) 808 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.200712+0000 mon.vm02 (mon.0) 809 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.200712+0000 mon.vm02 (mon.0) 809 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.204289+0000 mon.vm02 (mon.0) 810 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.204289+0000 mon.vm02 (mon.0) 810 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.205592+0000 mon.vm02 (mon.0) 811 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:40:12.890 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:12 vm02 bash[17473]: audit 2026-03-10T08:40:12.205592+0000 mon.vm02 (mon.0) 811 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:40:13.978 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:14.128 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:14.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 1s ago 68s - - 2026-03-10T08:40:14.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (70s) 1s ago 70s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:14.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (71s) 2s ago 71s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:14.128 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (69s) 2s ago 69s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:14.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:13 vm02 bash[17473]: cluster 2026-03-10T08:40:12.201824+0000 mgr.vm02.ttibzz (mgr.14195) 229 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:14.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:13 vm02 bash[17473]: cluster 2026-03-10T08:40:12.201824+0000 mgr.vm02.ttibzz (mgr.14195) 229 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:14.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:13 vm02 bash[17473]: cluster 2026-03-10T08:40:13.197838+0000 mon.vm02 (mon.0) 812 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T08:40:14.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:13 vm02 bash[17473]: cluster 2026-03-10T08:40:13.197838+0000 mon.vm02 (mon.0) 812 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T08:40:14.306 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:14.306 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:14.306 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:15.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:14 vm02 bash[17473]: audit 2026-03-10T08:40:14.306151+0000 mon.vm02 (mon.0) 813 : audit [DBG] from='client.? 192.168.123.102:0/2561653173' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:15.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:14 vm02 bash[17473]: audit 2026-03-10T08:40:14.306151+0000 mon.vm02 (mon.0) 813 : audit [DBG] from='client.? 192.168.123.102:0/2561653173' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:16.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:15 vm02 bash[17473]: audit 2026-03-10T08:40:13.962744+0000 mgr.vm02.ttibzz (mgr.14195) 230 : audit [DBG] from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:16.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:15 vm02 bash[17473]: audit 2026-03-10T08:40:13.962744+0000 mgr.vm02.ttibzz (mgr.14195) 230 : audit [DBG] from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:16.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:15 vm02 bash[17473]: audit 2026-03-10T08:40:14.125037+0000 mgr.vm02.ttibzz (mgr.14195) 231 : audit [DBG] from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:16.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:15 vm02 bash[17473]: audit 2026-03-10T08:40:14.125037+0000 mgr.vm02.ttibzz (mgr.14195) 231 : audit [DBG] from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:16.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:15 vm02 bash[17473]: cluster 2026-03-10T08:40:14.202285+0000 mgr.vm02.ttibzz (mgr.14195) 232 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:16.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:15 vm02 bash[17473]: cluster 2026-03-10T08:40:14.202285+0000 mgr.vm02.ttibzz (mgr.14195) 232 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 454 KiB data, 222 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:18.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:17 vm02 bash[17473]: cluster 2026-03-10T08:40:16.202755+0000 mgr.vm02.ttibzz (mgr.14195) 233 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 295 B/s rd, 590 B/s wr, 0 op/s 2026-03-10T08:40:18.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:17 vm02 bash[17473]: cluster 2026-03-10T08:40:16.202755+0000 mgr.vm02.ttibzz (mgr.14195) 233 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 295 B/s rd, 590 B/s wr, 0 op/s 2026-03-10T08:40:19.478 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:19.623 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:19.623 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 7s ago 74s - - 2026-03-10T08:40:19.623 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (76s) 7s ago 76s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:19.623 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (76s) 7s ago 76s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:19.623 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (75s) 7s ago 75s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:19.805 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:19.805 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:19.805 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: cluster 2026-03-10T08:40:18.203260+0000 mgr.vm02.ttibzz (mgr.14195) 234 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 256 B/s rd, 512 B/s wr, 0 op/s 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: cluster 2026-03-10T08:40:18.203260+0000 mgr.vm02.ttibzz (mgr.14195) 234 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 256 B/s rd, 512 B/s wr, 0 op/s 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: audit 2026-03-10T08:40:19.243107+0000 mon.vm02 (mon.0) 814 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: audit 2026-03-10T08:40:19.243107+0000 mon.vm02 (mon.0) 814 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: audit 2026-03-10T08:40:19.243800+0000 mon.vm02 (mon.0) 815 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: audit 2026-03-10T08:40:19.243800+0000 mon.vm02 (mon.0) 815 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: audit 2026-03-10T08:40:19.804434+0000 mon.vm02 (mon.0) 816 : audit [DBG] from='client.? 192.168.123.102:0/3421172389' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:20.037 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:19 vm02 bash[17473]: audit 2026-03-10T08:40:19.804434+0000 mon.vm02 (mon.0) 816 : audit [DBG] from='client.? 192.168.123.102:0/3421172389' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:21.286 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:20 vm02 bash[17473]: audit 2026-03-10T08:40:19.462863+0000 mgr.vm02.ttibzz (mgr.14195) 235 : audit [DBG] from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:21.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:20 vm02 bash[17473]: audit 2026-03-10T08:40:19.462863+0000 mgr.vm02.ttibzz (mgr.14195) 235 : audit [DBG] from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:21.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:20 vm02 bash[17473]: audit 2026-03-10T08:40:19.619859+0000 mgr.vm02.ttibzz (mgr.14195) 236 : audit [DBG] from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:21.287 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:20 vm02 bash[17473]: audit 2026-03-10T08:40:19.619859+0000 mgr.vm02.ttibzz (mgr.14195) 236 : audit [DBG] from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:22.286 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:21 vm02 bash[17473]: cluster 2026-03-10T08:40:20.203679+0000 mgr.vm02.ttibzz (mgr.14195) 237 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 256 B/s rd, 512 B/s wr, 0 op/s 2026-03-10T08:40:22.286 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:21 vm02 bash[17473]: cluster 2026-03-10T08:40:20.203679+0000 mgr.vm02.ttibzz (mgr.14195) 237 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 256 B/s rd, 512 B/s wr, 0 op/s 2026-03-10T08:40:23.286 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:22 vm02 bash[17473]: cluster 2026-03-10T08:40:22.204072+0000 mgr.vm02.ttibzz (mgr.14195) 238 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 256 B/s rd, 513 B/s wr, 0 op/s 2026-03-10T08:40:23.286 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:22 vm02 bash[17473]: cluster 2026-03-10T08:40:22.204072+0000 mgr.vm02.ttibzz (mgr.14195) 238 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 256 B/s rd, 513 B/s wr, 0 op/s 2026-03-10T08:40:24.976 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:25.122 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:25.122 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 12s ago 79s - - 2026-03-10T08:40:25.122 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (81s) 12s ago 81s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:25.122 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (82s) 13s ago 82s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:25.122 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (80s) 13s ago 80s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:25.312 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:25.312 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:25.312 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:25.536 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:25 vm02 bash[17473]: cluster 2026-03-10T08:40:24.204480+0000 mgr.vm02.ttibzz (mgr.14195) 239 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:25.536 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:25 vm02 bash[17473]: cluster 2026-03-10T08:40:24.204480+0000 mgr.vm02.ttibzz (mgr.14195) 239 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:26.536 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:26 vm02 bash[17473]: audit 2026-03-10T08:40:24.961957+0000 mgr.vm02.ttibzz (mgr.14195) 240 : audit [DBG] from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:26.536 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:26 vm02 bash[17473]: audit 2026-03-10T08:40:24.961957+0000 mgr.vm02.ttibzz (mgr.14195) 240 : audit [DBG] from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:26.537 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:26 vm02 bash[17473]: audit 2026-03-10T08:40:25.119112+0000 mgr.vm02.ttibzz (mgr.14195) 241 : audit [DBG] from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:26.537 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:26 vm02 bash[17473]: audit 2026-03-10T08:40:25.119112+0000 mgr.vm02.ttibzz (mgr.14195) 241 : audit [DBG] from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:26.537 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:26 vm02 bash[17473]: audit 2026-03-10T08:40:25.311955+0000 mon.vm07 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.102:0/2432329089' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:26.537 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:26 vm02 bash[17473]: audit 2026-03-10T08:40:25.311955+0000 mon.vm07 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.102:0/2432329089' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:27.536 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:27 vm02 bash[17473]: cluster 2026-03-10T08:40:26.204964+0000 mgr.vm02.ttibzz (mgr.14195) 242 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:27.536 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:27 vm02 bash[17473]: cluster 2026-03-10T08:40:26.204964+0000 mgr.vm02.ttibzz (mgr.14195) 242 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:29.694 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:29 vm02 bash[17473]: cluster 2026-03-10T08:40:28.205344+0000 mgr.vm02.ttibzz (mgr.14195) 243 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:29.694 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:29 vm02 bash[17473]: cluster 2026-03-10T08:40:28.205344+0000 mgr.vm02.ttibzz (mgr.14195) 243 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:30.489 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:30.642 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:30.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 18s ago 85s - - 2026-03-10T08:40:30.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (87s) 18s ago 87s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:30.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (87s) 18s ago 87s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:30.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (86s) 18s ago 86s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:30.843 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:30.843 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:30.843 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: cluster 2026-03-10T08:40:30.205745+0000 mgr.vm02.ttibzz (mgr.14195) 244 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: cluster 2026-03-10T08:40:30.205745+0000 mgr.vm02.ttibzz (mgr.14195) 244 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: audit 2026-03-10T08:40:30.472718+0000 mgr.vm02.ttibzz (mgr.14195) 245 : audit [DBG] from='client.24471 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: audit 2026-03-10T08:40:30.472718+0000 mgr.vm02.ttibzz (mgr.14195) 245 : audit [DBG] from='client.24471 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: audit 2026-03-10T08:40:30.640075+0000 mgr.vm02.ttibzz (mgr.14195) 246 : audit [DBG] from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: audit 2026-03-10T08:40:30.640075+0000 mgr.vm02.ttibzz (mgr.14195) 246 : audit [DBG] from='client.14708 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: audit 2026-03-10T08:40:30.843056+0000 mon.vm02 (mon.0) 817 : audit [DBG] from='client.? 192.168.123.102:0/389199557' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:31.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:31 vm02 bash[17473]: audit 2026-03-10T08:40:30.843056+0000 mon.vm02 (mon.0) 817 : audit [DBG] from='client.? 192.168.123.102:0/389199557' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:33.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:33 vm02 bash[17473]: cluster 2026-03-10T08:40:32.206198+0000 mgr.vm02.ttibzz (mgr.14195) 247 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:33.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:33 vm02 bash[17473]: cluster 2026-03-10T08:40:32.206198+0000 mgr.vm02.ttibzz (mgr.14195) 247 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:34.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:34 vm02 bash[17473]: audit 2026-03-10T08:40:34.239025+0000 mon.vm02 (mon.0) 818 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:34.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:34 vm02 bash[17473]: audit 2026-03-10T08:40:34.239025+0000 mon.vm02 (mon.0) 818 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:35.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:35 vm02 bash[17473]: cluster 2026-03-10T08:40:34.206626+0000 mgr.vm02.ttibzz (mgr.14195) 248 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:35.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:35 vm02 bash[17473]: cluster 2026-03-10T08:40:34.206626+0000 mgr.vm02.ttibzz (mgr.14195) 248 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:36.017 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:36.162 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:36.163 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 23s ago 90s - - 2026-03-10T08:40:36.163 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (92s) 23s ago 92s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:36.163 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (93s) 24s ago 93s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:36.163 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (91s) 24s ago 91s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:36.345 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:36.346 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:36.346 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:36.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:36 vm02 bash[17473]: audit 2026-03-10T08:40:36.345646+0000 mon.vm02 (mon.0) 819 : audit [DBG] from='client.? 192.168.123.102:0/1872958176' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:36.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:36 vm02 bash[17473]: audit 2026-03-10T08:40:36.345646+0000 mon.vm02 (mon.0) 819 : audit [DBG] from='client.? 192.168.123.102:0/1872958176' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:37.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:37 vm02 bash[17473]: audit 2026-03-10T08:40:36.002664+0000 mgr.vm02.ttibzz (mgr.14195) 249 : audit [DBG] from='client.14716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:37.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:37 vm02 bash[17473]: audit 2026-03-10T08:40:36.002664+0000 mgr.vm02.ttibzz (mgr.14195) 249 : audit [DBG] from='client.14716 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:37.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:37 vm02 bash[17473]: audit 2026-03-10T08:40:36.160577+0000 mgr.vm02.ttibzz (mgr.14195) 250 : audit [DBG] from='client.14720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:37.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:37 vm02 bash[17473]: audit 2026-03-10T08:40:36.160577+0000 mgr.vm02.ttibzz (mgr.14195) 250 : audit [DBG] from='client.14720 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:37.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:37 vm02 bash[17473]: cluster 2026-03-10T08:40:36.207035+0000 mgr.vm02.ttibzz (mgr.14195) 251 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:37.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:37 vm02 bash[17473]: cluster 2026-03-10T08:40:36.207035+0000 mgr.vm02.ttibzz (mgr.14195) 251 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:39.693 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:39 vm02 bash[17473]: cluster 2026-03-10T08:40:38.207381+0000 mgr.vm02.ttibzz (mgr.14195) 252 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:39.693 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:39 vm02 bash[17473]: cluster 2026-03-10T08:40:38.207381+0000 mgr.vm02.ttibzz (mgr.14195) 252 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:41.514 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:41.659 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:41.659 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 29s ago 96s - - 2026-03-10T08:40:41.659 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (98s) 29s ago 98s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:41.659 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (98s) 29s ago 98s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:41.659 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (97s) 29s ago 97s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:41.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:41 vm02 bash[17473]: cluster 2026-03-10T08:40:40.207697+0000 mgr.vm02.ttibzz (mgr.14195) 253 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:41.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:41 vm02 bash[17473]: cluster 2026-03-10T08:40:40.207697+0000 mgr.vm02.ttibzz (mgr.14195) 253 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:41.834 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:41.835 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:41.835 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:42.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:42 vm02 bash[17473]: audit 2026-03-10T08:40:41.500467+0000 mgr.vm02.ttibzz (mgr.14195) 254 : audit [DBG] from='client.14728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:42.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:42 vm02 bash[17473]: audit 2026-03-10T08:40:41.500467+0000 mgr.vm02.ttibzz (mgr.14195) 254 : audit [DBG] from='client.14728 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:42.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:42 vm02 bash[17473]: audit 2026-03-10T08:40:41.657303+0000 mgr.vm02.ttibzz (mgr.14195) 255 : audit [DBG] from='client.14732 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:42.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:42 vm02 bash[17473]: audit 2026-03-10T08:40:41.657303+0000 mgr.vm02.ttibzz (mgr.14195) 255 : audit [DBG] from='client.14732 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:42.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:42 vm02 bash[17473]: audit 2026-03-10T08:40:41.835422+0000 mon.vm02 (mon.0) 820 : audit [DBG] from='client.? 192.168.123.102:0/3245585908' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:42.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:42 vm02 bash[17473]: audit 2026-03-10T08:40:41.835422+0000 mon.vm02 (mon.0) 820 : audit [DBG] from='client.? 192.168.123.102:0/3245585908' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:43.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:43 vm02 bash[17473]: cluster 2026-03-10T08:40:42.208094+0000 mgr.vm02.ttibzz (mgr.14195) 256 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:43.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:43 vm02 bash[17473]: cluster 2026-03-10T08:40:42.208094+0000 mgr.vm02.ttibzz (mgr.14195) 256 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:45.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:45 vm02 bash[17473]: cluster 2026-03-10T08:40:44.208549+0000 mgr.vm02.ttibzz (mgr.14195) 257 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:45.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:45 vm02 bash[17473]: cluster 2026-03-10T08:40:44.208549+0000 mgr.vm02.ttibzz (mgr.14195) 257 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:40:47.003 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:47.151 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:47.152 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 34s ago 101s - - 2026-03-10T08:40:47.152 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (103s) 34s ago 103s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:47.152 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (104s) 35s ago 104s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:47.152 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (102s) 35s ago 102s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:47.335 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:47.335 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:47.335 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:47.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:47 vm02 bash[17473]: cluster 2026-03-10T08:40:46.208944+0000 mgr.vm02.ttibzz (mgr.14195) 258 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:47.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:47 vm02 bash[17473]: cluster 2026-03-10T08:40:46.208944+0000 mgr.vm02.ttibzz (mgr.14195) 258 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:47.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:47 vm02 bash[17473]: audit 2026-03-10T08:40:47.335674+0000 mon.vm02 (mon.0) 821 : audit [DBG] from='client.? 192.168.123.102:0/2791938525' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:47.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:47 vm02 bash[17473]: audit 2026-03-10T08:40:47.335674+0000 mon.vm02 (mon.0) 821 : audit [DBG] from='client.? 192.168.123.102:0/2791938525' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:48.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:48 vm02 bash[17473]: audit 2026-03-10T08:40:46.988664+0000 mgr.vm02.ttibzz (mgr.14195) 259 : audit [DBG] from='client.14740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:48.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:48 vm02 bash[17473]: audit 2026-03-10T08:40:46.988664+0000 mgr.vm02.ttibzz (mgr.14195) 259 : audit [DBG] from='client.14740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:48.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:48 vm02 bash[17473]: audit 2026-03-10T08:40:47.149919+0000 mgr.vm02.ttibzz (mgr.14195) 260 : audit [DBG] from='client.14744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:48.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:48 vm02 bash[17473]: audit 2026-03-10T08:40:47.149919+0000 mgr.vm02.ttibzz (mgr.14195) 260 : audit [DBG] from='client.14744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:49.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:49 vm02 bash[17473]: cluster 2026-03-10T08:40:48.209279+0000 mgr.vm02.ttibzz (mgr.14195) 261 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:49.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:49 vm02 bash[17473]: cluster 2026-03-10T08:40:48.209279+0000 mgr.vm02.ttibzz (mgr.14195) 261 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:49.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:49 vm02 bash[17473]: audit 2026-03-10T08:40:49.239373+0000 mon.vm02 (mon.0) 822 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:49.786 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:49 vm02 bash[17473]: audit 2026-03-10T08:40:49.239373+0000 mon.vm02 (mon.0) 822 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:40:52.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:51 vm02 bash[17473]: cluster 2026-03-10T08:40:50.209638+0000 mgr.vm02.ttibzz (mgr.14195) 262 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:52.036 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:51 vm02 bash[17473]: cluster 2026-03-10T08:40:50.209638+0000 mgr.vm02.ttibzz (mgr.14195) 262 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:52.511 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:52.661 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:52.661 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 40s ago 107s - - 2026-03-10T08:40:52.661 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (109s) 40s ago 109s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:52.661 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (109s) 40s ago 109s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:52.661 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (108s) 40s ago 108s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:52.858 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:52.858 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:52.858 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:54.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: cluster 2026-03-10T08:40:52.210042+0000 mgr.vm02.ttibzz (mgr.14195) 263 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:54.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: cluster 2026-03-10T08:40:52.210042+0000 mgr.vm02.ttibzz (mgr.14195) 263 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:54.036 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: audit 2026-03-10T08:40:52.496662+0000 mgr.vm02.ttibzz (mgr.14195) 264 : audit [DBG] from='client.14752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:54.036 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: audit 2026-03-10T08:40:52.496662+0000 mgr.vm02.ttibzz (mgr.14195) 264 : audit [DBG] from='client.14752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:54.036 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: audit 2026-03-10T08:40:52.659469+0000 mgr.vm02.ttibzz (mgr.14195) 265 : audit [DBG] from='client.24505 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:54.036 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: audit 2026-03-10T08:40:52.659469+0000 mgr.vm02.ttibzz (mgr.14195) 265 : audit [DBG] from='client.24505 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:40:54.036 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: audit 2026-03-10T08:40:52.859060+0000 mon.vm02 (mon.0) 823 : audit [DBG] from='client.? 192.168.123.102:0/130002378' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:54.036 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:53 vm02 bash[17473]: audit 2026-03-10T08:40:52.859060+0000 mon.vm02 (mon.0) 823 : audit [DBG] from='client.? 192.168.123.102:0/130002378' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:56.015 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:55 vm02 bash[17473]: cluster 2026-03-10T08:40:54.210484+0000 mgr.vm02.ttibzz (mgr.14195) 266 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:56.015 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:55 vm02 bash[17473]: cluster 2026-03-10T08:40:54.210484+0000 mgr.vm02.ttibzz (mgr.14195) 266 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:58.028 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:40:58.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:57 vm02 bash[17473]: cluster 2026-03-10T08:40:56.210922+0000 mgr.vm02.ttibzz (mgr.14195) 267 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:58.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:57 vm02 bash[17473]: cluster 2026-03-10T08:40:56.210922+0000 mgr.vm02.ttibzz (mgr.14195) 267 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:40:58.177 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:40:58.177 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 45s ago 112s - - 2026-03-10T08:40:58.177 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (114s) 45s ago 114s 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:40:58.177 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (115s) 46s ago 115s 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:40:58.177 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (113s) 46s ago 113s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:40:58.376 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:40:58.377 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:40:58.377 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:40:59.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:58 vm02 bash[17473]: audit 2026-03-10T08:40:58.377441+0000 mon.vm02 (mon.0) 824 : audit [DBG] from='client.? 192.168.123.102:0/797182064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:40:59.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:58 vm02 bash[17473]: audit 2026-03-10T08:40:58.377441+0000 mon.vm02 (mon.0) 824 : audit [DBG] from='client.? 192.168.123.102:0/797182064' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:00.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:59 vm02 bash[17473]: audit 2026-03-10T08:40:58.014189+0000 mgr.vm02.ttibzz (mgr.14195) 268 : audit [DBG] from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:00.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:59 vm02 bash[17473]: audit 2026-03-10T08:40:58.014189+0000 mgr.vm02.ttibzz (mgr.14195) 268 : audit [DBG] from='client.14764 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:00.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:59 vm02 bash[17473]: audit 2026-03-10T08:40:58.175171+0000 mgr.vm02.ttibzz (mgr.14195) 269 : audit [DBG] from='client.14768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:00.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:59 vm02 bash[17473]: audit 2026-03-10T08:40:58.175171+0000 mgr.vm02.ttibzz (mgr.14195) 269 : audit [DBG] from='client.14768 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:00.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:59 vm02 bash[17473]: cluster 2026-03-10T08:40:58.211274+0000 mgr.vm02.ttibzz (mgr.14195) 270 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:00.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:40:59 vm02 bash[17473]: cluster 2026-03-10T08:40:58.211274+0000 mgr.vm02.ttibzz (mgr.14195) 270 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:02.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:01 vm02 bash[17473]: cluster 2026-03-10T08:41:00.211736+0000 mgr.vm02.ttibzz (mgr.14195) 271 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:02.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:01 vm02 bash[17473]: cluster 2026-03-10T08:41:00.211736+0000 mgr.vm02.ttibzz (mgr.14195) 271 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:03.546 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:03.688 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:03.689 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 51s ago 118s - - 2026-03-10T08:41:03.689 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 51s ago 2m 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:03.689 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 51s ago 2m 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:03.689 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (119s) 51s ago 119s 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:03.877 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:03.877 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:03.877 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:04.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:03 vm02 bash[17473]: cluster 2026-03-10T08:41:02.212200+0000 mgr.vm02.ttibzz (mgr.14195) 272 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:04.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:03 vm02 bash[17473]: cluster 2026-03-10T08:41:02.212200+0000 mgr.vm02.ttibzz (mgr.14195) 272 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:03.532516+0000 mgr.vm02.ttibzz (mgr.14195) 273 : audit [DBG] from='client.14776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:03.532516+0000 mgr.vm02.ttibzz (mgr.14195) 273 : audit [DBG] from='client.14776 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:03.687208+0000 mgr.vm02.ttibzz (mgr.14195) 274 : audit [DBG] from='client.14780 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:03.687208+0000 mgr.vm02.ttibzz (mgr.14195) 274 : audit [DBG] from='client.14780 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:03.877845+0000 mon.vm02 (mon.0) 825 : audit [DBG] from='client.? 192.168.123.102:0/11519677' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:03.877845+0000 mon.vm02 (mon.0) 825 : audit [DBG] from='client.? 192.168.123.102:0/11519677' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:04.239601+0000 mon.vm02 (mon.0) 826 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:05.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:04 vm02 bash[17473]: audit 2026-03-10T08:41:04.239601+0000 mon.vm02 (mon.0) 826 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:06.132 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:05 vm02 bash[17473]: cluster 2026-03-10T08:41:04.212601+0000 mgr.vm02.ttibzz (mgr.14195) 275 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:06.132 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:05 vm02 bash[17473]: cluster 2026-03-10T08:41:04.212601+0000 mgr.vm02.ttibzz (mgr.14195) 275 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:08.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:07 vm02 bash[17473]: cluster 2026-03-10T08:41:06.213001+0000 mgr.vm02.ttibzz (mgr.14195) 276 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:08.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:07 vm02 bash[17473]: cluster 2026-03-10T08:41:06.213001+0000 mgr.vm02.ttibzz (mgr.14195) 276 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:09.048 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:09.204 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:09.204 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 57s ago 2m - - 2026-03-10T08:41:09.204 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 57s ago 2m 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:09.204 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 57s ago 2m 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:09.204 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 57s ago 2m 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:09.394 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:09.394 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:09.394 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:10.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:09 vm02 bash[17473]: cluster 2026-03-10T08:41:08.213326+0000 mgr.vm02.ttibzz (mgr.14195) 277 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:10.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:09 vm02 bash[17473]: cluster 2026-03-10T08:41:08.213326+0000 mgr.vm02.ttibzz (mgr.14195) 277 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:10.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:09 vm02 bash[17473]: audit 2026-03-10T08:41:09.394946+0000 mon.vm02 (mon.0) 827 : audit [DBG] from='client.? 192.168.123.102:0/227914844' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:10.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:09 vm02 bash[17473]: audit 2026-03-10T08:41:09.394946+0000 mon.vm02 (mon.0) 827 : audit [DBG] from='client.? 192.168.123.102:0/227914844' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:11.184 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:10 vm02 bash[17473]: audit 2026-03-10T08:41:09.032569+0000 mgr.vm02.ttibzz (mgr.14195) 278 : audit [DBG] from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:11.184 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:10 vm02 bash[17473]: audit 2026-03-10T08:41:09.032569+0000 mgr.vm02.ttibzz (mgr.14195) 278 : audit [DBG] from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:11.184 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:10 vm02 bash[17473]: audit 2026-03-10T08:41:09.202561+0000 mgr.vm02.ttibzz (mgr.14195) 279 : audit [DBG] from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:11.184 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:10 vm02 bash[17473]: audit 2026-03-10T08:41:09.202561+0000 mgr.vm02.ttibzz (mgr.14195) 279 : audit [DBG] from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:12.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:11 vm02 bash[17473]: cluster 2026-03-10T08:41:10.213661+0000 mgr.vm02.ttibzz (mgr.14195) 280 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:12.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:11 vm02 bash[17473]: cluster 2026-03-10T08:41:10.213661+0000 mgr.vm02.ttibzz (mgr.14195) 280 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:13.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:12 vm02 bash[17473]: audit 2026-03-10T08:41:12.244983+0000 mon.vm02 (mon.0) 828 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:41:13.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:12 vm02 bash[17473]: audit 2026-03-10T08:41:12.244983+0000 mon.vm02 (mon.0) 828 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:41:14.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:13 vm02 bash[17473]: cluster 2026-03-10T08:41:12.214137+0000 mgr.vm02.ttibzz (mgr.14195) 281 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:14.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:13 vm02 bash[17473]: cluster 2026-03-10T08:41:12.214137+0000 mgr.vm02.ttibzz (mgr.14195) 281 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:14.571 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:14.715 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:14.715 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 62s ago 2m - - 2026-03-10T08:41:14.715 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 62s ago 2m 97.1M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:14.715 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 62s ago 2m 92.0M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:14.715 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 62s ago 2m 92.3M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:14.896 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:14.896 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:14.896 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: cluster 2026-03-10T08:41:14.214620+0000 mgr.vm02.ttibzz (mgr.14195) 282 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: cluster 2026-03-10T08:41:14.214620+0000 mgr.vm02.ttibzz (mgr.14195) 282 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: audit 2026-03-10T08:41:14.557684+0000 mgr.vm02.ttibzz (mgr.14195) 283 : audit [DBG] from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: audit 2026-03-10T08:41:14.557684+0000 mgr.vm02.ttibzz (mgr.14195) 283 : audit [DBG] from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: audit 2026-03-10T08:41:14.713798+0000 mgr.vm02.ttibzz (mgr.14195) 284 : audit [DBG] from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: audit 2026-03-10T08:41:14.713798+0000 mgr.vm02.ttibzz (mgr.14195) 284 : audit [DBG] from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: audit 2026-03-10T08:41:14.897562+0000 mon.vm02 (mon.0) 829 : audit [DBG] from='client.? 192.168.123.102:0/2795856751' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:16.240 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:15 vm02 bash[17473]: audit 2026-03-10T08:41:14.897562+0000 mon.vm02 (mon.0) 829 : audit [DBG] from='client.? 192.168.123.102:0/2795856751' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: cluster 2026-03-10T08:41:16.215061+0000 mgr.vm02.ttibzz (mgr.14195) 285 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: cluster 2026-03-10T08:41:16.215061+0000 mgr.vm02.ttibzz (mgr.14195) 285 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.499252+0000 mon.vm02 (mon.0) 830 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.499252+0000 mon.vm02 (mon.0) 830 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.504067+0000 mon.vm02 (mon.0) 831 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.504067+0000 mon.vm02 (mon.0) 831 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.788413+0000 mon.vm02 (mon.0) 832 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.788413+0000 mon.vm02 (mon.0) 832 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.797879+0000 mon.vm02 (mon.0) 833 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:18.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:17 vm02 bash[17473]: audit 2026-03-10T08:41:17.797879+0000 mon.vm02 (mon.0) 833 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.079846+0000 mon.vm02 (mon.0) 834 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.079846+0000 mon.vm02 (mon.0) 834 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.080433+0000 mon.vm02 (mon.0) 835 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.080433+0000 mon.vm02 (mon.0) 835 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.085166+0000 mon.vm02 (mon.0) 836 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.085166+0000 mon.vm02 (mon.0) 836 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.086728+0000 mon.vm02 (mon.0) 837 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:41:19.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:18 vm02 bash[17473]: audit 2026-03-10T08:41:18.086728+0000 mon.vm02 (mon.0) 837 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:41:20.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:19 vm02 bash[17473]: cluster 2026-03-10T08:41:18.081286+0000 mgr.vm02.ttibzz (mgr.14195) 286 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:20.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:19 vm02 bash[17473]: cluster 2026-03-10T08:41:18.081286+0000 mgr.vm02.ttibzz (mgr.14195) 286 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:20.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:19 vm02 bash[17473]: audit 2026-03-10T08:41:19.241287+0000 mon.vm02 (mon.0) 838 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:20.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:19 vm02 bash[17473]: audit 2026-03-10T08:41:19.241287+0000 mon.vm02 (mon.0) 838 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:20.069 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:20.224 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:20.224 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 2s ago 2m - - 2026-03-10T08:41:20.224 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:20.224 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 2s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:20.224 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 2s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:20.406 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:20.406 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:20.406 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:21.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:20 vm02 bash[17473]: audit 2026-03-10T08:41:20.407306+0000 mon.vm02 (mon.0) 839 : audit [DBG] from='client.? 192.168.123.102:0/3990471894' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:21.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:20 vm02 bash[17473]: audit 2026-03-10T08:41:20.407306+0000 mon.vm02 (mon.0) 839 : audit [DBG] from='client.? 192.168.123.102:0/3990471894' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:22.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:21 vm02 bash[17473]: audit 2026-03-10T08:41:20.055046+0000 mgr.vm02.ttibzz (mgr.14195) 287 : audit [DBG] from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:22.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:21 vm02 bash[17473]: audit 2026-03-10T08:41:20.055046+0000 mgr.vm02.ttibzz (mgr.14195) 287 : audit [DBG] from='client.14812 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:22.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:21 vm02 bash[17473]: cluster 2026-03-10T08:41:20.081674+0000 mgr.vm02.ttibzz (mgr.14195) 288 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:22.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:21 vm02 bash[17473]: cluster 2026-03-10T08:41:20.081674+0000 mgr.vm02.ttibzz (mgr.14195) 288 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:22.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:21 vm02 bash[17473]: audit 2026-03-10T08:41:20.222821+0000 mgr.vm02.ttibzz (mgr.14195) 289 : audit [DBG] from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:22.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:21 vm02 bash[17473]: audit 2026-03-10T08:41:20.222821+0000 mgr.vm02.ttibzz (mgr.14195) 289 : audit [DBG] from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:24.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:23 vm02 bash[17473]: cluster 2026-03-10T08:41:22.082145+0000 mgr.vm02.ttibzz (mgr.14195) 290 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:24.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:23 vm02 bash[17473]: cluster 2026-03-10T08:41:22.082145+0000 mgr.vm02.ttibzz (mgr.14195) 290 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:25.574 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:25.733 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:25.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 7s ago 2m - - 2026-03-10T08:41:25.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 7s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:25.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 8s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:25.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 8s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:25.914 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:25.914 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:25.914 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:26.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:25 vm02 bash[17473]: cluster 2026-03-10T08:41:24.082574+0000 mgr.vm02.ttibzz (mgr.14195) 291 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:26.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:25 vm02 bash[17473]: cluster 2026-03-10T08:41:24.082574+0000 mgr.vm02.ttibzz (mgr.14195) 291 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:26.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:25 vm02 bash[17473]: audit 2026-03-10T08:41:25.915288+0000 mon.vm02 (mon.0) 840 : audit [DBG] from='client.? 192.168.123.102:0/267946604' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:26.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:25 vm02 bash[17473]: audit 2026-03-10T08:41:25.915288+0000 mon.vm02 (mon.0) 840 : audit [DBG] from='client.? 192.168.123.102:0/267946604' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:27.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:26 vm02 bash[17473]: audit 2026-03-10T08:41:25.559778+0000 mgr.vm02.ttibzz (mgr.14195) 292 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:27.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:26 vm02 bash[17473]: audit 2026-03-10T08:41:25.559778+0000 mgr.vm02.ttibzz (mgr.14195) 292 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:27.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:26 vm02 bash[17473]: audit 2026-03-10T08:41:25.732339+0000 mgr.vm02.ttibzz (mgr.14195) 293 : audit [DBG] from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:27.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:26 vm02 bash[17473]: audit 2026-03-10T08:41:25.732339+0000 mgr.vm02.ttibzz (mgr.14195) 293 : audit [DBG] from='client.14828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:28.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:27 vm02 bash[17473]: cluster 2026-03-10T08:41:26.083042+0000 mgr.vm02.ttibzz (mgr.14195) 294 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:28.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:27 vm02 bash[17473]: cluster 2026-03-10T08:41:26.083042+0000 mgr.vm02.ttibzz (mgr.14195) 294 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 258 B/s rd, 517 B/s wr, 0 op/s 2026-03-10T08:41:29.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:28 vm02 bash[17473]: cluster 2026-03-10T08:41:28.083444+0000 mgr.vm02.ttibzz (mgr.14195) 295 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:29.285 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:28 vm02 bash[17473]: cluster 2026-03-10T08:41:28.083444+0000 mgr.vm02.ttibzz (mgr.14195) 295 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:31.081 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:31.235 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:31.235 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 13s ago 2m - - 2026-03-10T08:41:31.235 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 13s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:31.235 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 13s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:31.235 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 13s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:31.400 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:31 vm02 bash[17473]: cluster 2026-03-10T08:41:30.083899+0000 mgr.vm02.ttibzz (mgr.14195) 296 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:31.400 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:31 vm02 bash[17473]: cluster 2026-03-10T08:41:30.083899+0000 mgr.vm02.ttibzz (mgr.14195) 296 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:31.426 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:31.426 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:31.426 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:32.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:32 vm02 bash[17473]: audit 2026-03-10T08:41:31.067403+0000 mgr.vm02.ttibzz (mgr.14195) 297 : audit [DBG] from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:32.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:32 vm02 bash[17473]: audit 2026-03-10T08:41:31.067403+0000 mgr.vm02.ttibzz (mgr.14195) 297 : audit [DBG] from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:32.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:32 vm02 bash[17473]: audit 2026-03-10T08:41:31.233796+0000 mgr.vm02.ttibzz (mgr.14195) 298 : audit [DBG] from='client.14840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:32.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:32 vm02 bash[17473]: audit 2026-03-10T08:41:31.233796+0000 mgr.vm02.ttibzz (mgr.14195) 298 : audit [DBG] from='client.14840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:32.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:32 vm02 bash[17473]: audit 2026-03-10T08:41:31.427648+0000 mon.vm02 (mon.0) 841 : audit [DBG] from='client.? 192.168.123.102:0/1756714901' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:32.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:32 vm02 bash[17473]: audit 2026-03-10T08:41:31.427648+0000 mon.vm02 (mon.0) 841 : audit [DBG] from='client.? 192.168.123.102:0/1756714901' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:33.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:33 vm02 bash[17473]: cluster 2026-03-10T08:41:32.084321+0000 mgr.vm02.ttibzz (mgr.14195) 299 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:33.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:33 vm02 bash[17473]: cluster 2026-03-10T08:41:32.084321+0000 mgr.vm02.ttibzz (mgr.14195) 299 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:35.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:35 vm02 bash[17473]: cluster 2026-03-10T08:41:34.084730+0000 mgr.vm02.ttibzz (mgr.14195) 300 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:35.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:35 vm02 bash[17473]: cluster 2026-03-10T08:41:34.084730+0000 mgr.vm02.ttibzz (mgr.14195) 300 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:35.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:35 vm02 bash[17473]: audit 2026-03-10T08:41:34.240082+0000 mon.vm02 (mon.0) 842 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:35.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:35 vm02 bash[17473]: audit 2026-03-10T08:41:34.240082+0000 mon.vm02 (mon.0) 842 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:36.615 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:36.762 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:36.762 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 18s ago 2m - - 2026-03-10T08:41:36.762 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 18s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:36.762 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 19s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:36.762 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 19s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:36.952 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:36.952 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:36.952 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: cluster 2026-03-10T08:41:36.085131+0000 mgr.vm02.ttibzz (mgr.14195) 301 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: cluster 2026-03-10T08:41:36.085131+0000 mgr.vm02.ttibzz (mgr.14195) 301 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: audit 2026-03-10T08:41:36.602017+0000 mgr.vm02.ttibzz (mgr.14195) 302 : audit [DBG] from='client.14848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: audit 2026-03-10T08:41:36.602017+0000 mgr.vm02.ttibzz (mgr.14195) 302 : audit [DBG] from='client.14848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: audit 2026-03-10T08:41:36.761191+0000 mgr.vm02.ttibzz (mgr.14195) 303 : audit [DBG] from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: audit 2026-03-10T08:41:36.761191+0000 mgr.vm02.ttibzz (mgr.14195) 303 : audit [DBG] from='client.14852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: audit 2026-03-10T08:41:36.953941+0000 mon.vm02 (mon.0) 843 : audit [DBG] from='client.? 192.168.123.102:0/1806393965' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:37.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:37 vm02 bash[17473]: audit 2026-03-10T08:41:36.953941+0000 mon.vm02 (mon.0) 843 : audit [DBG] from='client.? 192.168.123.102:0/1806393965' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:39.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:39 vm02 bash[17473]: cluster 2026-03-10T08:41:38.085501+0000 mgr.vm02.ttibzz (mgr.14195) 304 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:39.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:39 vm02 bash[17473]: cluster 2026-03-10T08:41:38.085501+0000 mgr.vm02.ttibzz (mgr.14195) 304 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:41.510 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:41 vm02 bash[17473]: cluster 2026-03-10T08:41:40.085894+0000 mgr.vm02.ttibzz (mgr.14195) 305 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:41.511 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:41 vm02 bash[17473]: cluster 2026-03-10T08:41:40.085894+0000 mgr.vm02.ttibzz (mgr.14195) 305 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:42.117 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:42.268 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:42.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 24s ago 2m - - 2026-03-10T08:41:42.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 24s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:42.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 24s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:42.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 24s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:42.456 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:42.456 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:42.456 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: cluster 2026-03-10T08:41:42.086292+0000 mgr.vm02.ttibzz (mgr.14195) 306 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: cluster 2026-03-10T08:41:42.086292+0000 mgr.vm02.ttibzz (mgr.14195) 306 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: audit 2026-03-10T08:41:42.104127+0000 mgr.vm02.ttibzz (mgr.14195) 307 : audit [DBG] from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: audit 2026-03-10T08:41:42.104127+0000 mgr.vm02.ttibzz (mgr.14195) 307 : audit [DBG] from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: audit 2026-03-10T08:41:42.266564+0000 mgr.vm02.ttibzz (mgr.14195) 308 : audit [DBG] from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: audit 2026-03-10T08:41:42.266564+0000 mgr.vm02.ttibzz (mgr.14195) 308 : audit [DBG] from='client.14864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: audit 2026-03-10T08:41:42.457195+0000 mon.vm02 (mon.0) 844 : audit [DBG] from='client.? 192.168.123.102:0/2381450786' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:43.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:43 vm02 bash[17473]: audit 2026-03-10T08:41:42.457195+0000 mon.vm02 (mon.0) 844 : audit [DBG] from='client.? 192.168.123.102:0/2381450786' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:45.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:45 vm02 bash[17473]: cluster 2026-03-10T08:41:44.086685+0000 mgr.vm02.ttibzz (mgr.14195) 309 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:45.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:45 vm02 bash[17473]: cluster 2026-03-10T08:41:44.086685+0000 mgr.vm02.ttibzz (mgr.14195) 309 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:47.627 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:47.775 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:47.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 29s ago 2m - - 2026-03-10T08:41:47.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 29s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:47.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 30s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:47.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 30s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:47.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:47 vm02 bash[17473]: cluster 2026-03-10T08:41:46.087111+0000 mgr.vm02.ttibzz (mgr.14195) 310 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:47.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:47 vm02 bash[17473]: cluster 2026-03-10T08:41:46.087111+0000 mgr.vm02.ttibzz (mgr.14195) 310 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:47.955 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:47.955 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:47.955 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:48.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:48 vm02 bash[17473]: audit 2026-03-10T08:41:47.613568+0000 mgr.vm02.ttibzz (mgr.14195) 311 : audit [DBG] from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:48.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:48 vm02 bash[17473]: audit 2026-03-10T08:41:47.613568+0000 mgr.vm02.ttibzz (mgr.14195) 311 : audit [DBG] from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:48.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:48 vm02 bash[17473]: audit 2026-03-10T08:41:47.774166+0000 mgr.vm02.ttibzz (mgr.14195) 312 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:48.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:48 vm02 bash[17473]: audit 2026-03-10T08:41:47.774166+0000 mgr.vm02.ttibzz (mgr.14195) 312 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:48.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:48 vm02 bash[17473]: audit 2026-03-10T08:41:47.956252+0000 mon.vm02 (mon.0) 845 : audit [DBG] from='client.? 192.168.123.102:0/512621061' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:48.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:48 vm02 bash[17473]: audit 2026-03-10T08:41:47.956252+0000 mon.vm02 (mon.0) 845 : audit [DBG] from='client.? 192.168.123.102:0/512621061' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:49.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:49 vm02 bash[17473]: cluster 2026-03-10T08:41:48.087497+0000 mgr.vm02.ttibzz (mgr.14195) 313 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:49.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:49 vm02 bash[17473]: cluster 2026-03-10T08:41:48.087497+0000 mgr.vm02.ttibzz (mgr.14195) 313 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:49.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:49 vm02 bash[17473]: audit 2026-03-10T08:41:49.240454+0000 mon.vm02 (mon.0) 846 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:49.693 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:49 vm02 bash[17473]: audit 2026-03-10T08:41:49.240454+0000 mon.vm02 (mon.0) 846 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:41:51.620 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:51 vm02 bash[17473]: cluster 2026-03-10T08:41:50.087958+0000 mgr.vm02.ttibzz (mgr.14195) 314 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:51.620 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:51 vm02 bash[17473]: cluster 2026-03-10T08:41:50.087958+0000 mgr.vm02.ttibzz (mgr.14195) 314 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:53.114 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:53.270 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:53.270 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 35s ago 2m - - 2026-03-10T08:41:53.270 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 35s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:53.270 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 35s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:53.270 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 35s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:53.459 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:53.459 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:53.459 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:53.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:53 vm02 bash[17473]: cluster 2026-03-10T08:41:52.088419+0000 mgr.vm02.ttibzz (mgr.14195) 315 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:53.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:53 vm02 bash[17473]: cluster 2026-03-10T08:41:52.088419+0000 mgr.vm02.ttibzz (mgr.14195) 315 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:54.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:54 vm02 bash[17473]: audit 2026-03-10T08:41:53.102281+0000 mgr.vm02.ttibzz (mgr.14195) 316 : audit [DBG] from='client.14884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:54.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:54 vm02 bash[17473]: audit 2026-03-10T08:41:53.102281+0000 mgr.vm02.ttibzz (mgr.14195) 316 : audit [DBG] from='client.14884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:54.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:54 vm02 bash[17473]: audit 2026-03-10T08:41:53.267748+0000 mgr.vm02.ttibzz (mgr.14195) 317 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:54.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:54 vm02 bash[17473]: audit 2026-03-10T08:41:53.267748+0000 mgr.vm02.ttibzz (mgr.14195) 317 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:54.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:54 vm02 bash[17473]: audit 2026-03-10T08:41:53.460637+0000 mon.vm02 (mon.0) 847 : audit [DBG] from='client.? 192.168.123.102:0/1228051222' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:54.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:54 vm02 bash[17473]: audit 2026-03-10T08:41:53.460637+0000 mon.vm02 (mon.0) 847 : audit [DBG] from='client.? 192.168.123.102:0/1228051222' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:55.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:55 vm02 bash[17473]: cluster 2026-03-10T08:41:54.088790+0000 mgr.vm02.ttibzz (mgr.14195) 318 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:55.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:55 vm02 bash[17473]: cluster 2026-03-10T08:41:54.088790+0000 mgr.vm02.ttibzz (mgr.14195) 318 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:57.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:57 vm02 bash[17473]: cluster 2026-03-10T08:41:56.089179+0000 mgr.vm02.ttibzz (mgr.14195) 319 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:57.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:57 vm02 bash[17473]: cluster 2026-03-10T08:41:56.089179+0000 mgr.vm02.ttibzz (mgr.14195) 319 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:41:58.628 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:41:58.775 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:41:58.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 40s ago 2m - - 2026-03-10T08:41:58.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 40s ago 2m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:41:58.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 41s ago 2m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:41:58.775 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 41s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:41:58.968 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:41:58.968 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:41:58.968 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: cluster 2026-03-10T08:41:58.089624+0000 mgr.vm02.ttibzz (mgr.14195) 320 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: cluster 2026-03-10T08:41:58.089624+0000 mgr.vm02.ttibzz (mgr.14195) 320 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: audit 2026-03-10T08:41:58.615221+0000 mgr.vm02.ttibzz (mgr.14195) 321 : audit [DBG] from='client.14896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: audit 2026-03-10T08:41:58.615221+0000 mgr.vm02.ttibzz (mgr.14195) 321 : audit [DBG] from='client.14896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: audit 2026-03-10T08:41:58.774434+0000 mgr.vm02.ttibzz (mgr.14195) 322 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: audit 2026-03-10T08:41:58.774434+0000 mgr.vm02.ttibzz (mgr.14195) 322 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: audit 2026-03-10T08:41:58.970031+0000 mon.vm02 (mon.0) 848 : audit [DBG] from='client.? 192.168.123.102:0/2894600047' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:41:59.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:41:59 vm02 bash[17473]: audit 2026-03-10T08:41:58.970031+0000 mon.vm02 (mon.0) 848 : audit [DBG] from='client.? 192.168.123.102:0/2894600047' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:01.728 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:01 vm02 bash[17473]: cluster 2026-03-10T08:42:00.089975+0000 mgr.vm02.ttibzz (mgr.14195) 323 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:01.728 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:01 vm02 bash[17473]: cluster 2026-03-10T08:42:00.089975+0000 mgr.vm02.ttibzz (mgr.14195) 323 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:03.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:03 vm02 bash[17473]: cluster 2026-03-10T08:42:02.090372+0000 mgr.vm02.ttibzz (mgr.14195) 324 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:03.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:03 vm02 bash[17473]: cluster 2026-03-10T08:42:02.090372+0000 mgr.vm02.ttibzz (mgr.14195) 324 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:04.130 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:04.282 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:04.282 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 46s ago 2m - - 2026-03-10T08:42:04.282 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 46s ago 3m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:04.283 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 46s ago 3m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:04.283 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (2m) 46s ago 2m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:04.461 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:04.462 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:04.462 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:04.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:04 vm02 bash[17473]: audit 2026-03-10T08:42:04.240455+0000 mon.vm02 (mon.0) 849 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:04.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:04 vm02 bash[17473]: audit 2026-03-10T08:42:04.240455+0000 mon.vm02 (mon.0) 849 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:05.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: cluster 2026-03-10T08:42:04.090814+0000 mgr.vm02.ttibzz (mgr.14195) 325 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:05.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: cluster 2026-03-10T08:42:04.090814+0000 mgr.vm02.ttibzz (mgr.14195) 325 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:05.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: audit 2026-03-10T08:42:04.118296+0000 mgr.vm02.ttibzz (mgr.14195) 326 : audit [DBG] from='client.14908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:05.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: audit 2026-03-10T08:42:04.118296+0000 mgr.vm02.ttibzz (mgr.14195) 326 : audit [DBG] from='client.14908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:05.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: audit 2026-03-10T08:42:04.282195+0000 mgr.vm02.ttibzz (mgr.14195) 327 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:05.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: audit 2026-03-10T08:42:04.282195+0000 mgr.vm02.ttibzz (mgr.14195) 327 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:05.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: audit 2026-03-10T08:42:04.463559+0000 mon.vm02 (mon.0) 850 : audit [DBG] from='client.? 192.168.123.102:0/3212909' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:05.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:05 vm02 bash[17473]: audit 2026-03-10T08:42:04.463559+0000 mon.vm02 (mon.0) 850 : audit [DBG] from='client.? 192.168.123.102:0/3212909' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:07.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:07 vm02 bash[17473]: cluster 2026-03-10T08:42:06.091242+0000 mgr.vm02.ttibzz (mgr.14195) 328 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:07.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:07 vm02 bash[17473]: cluster 2026-03-10T08:42:06.091242+0000 mgr.vm02.ttibzz (mgr.14195) 328 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:09.624 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:09.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:09 vm02 bash[17473]: cluster 2026-03-10T08:42:08.091682+0000 mgr.vm02.ttibzz (mgr.14195) 329 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:09.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:09 vm02 bash[17473]: cluster 2026-03-10T08:42:08.091682+0000 mgr.vm02.ttibzz (mgr.14195) 329 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:09.777 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:09.777 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 51s ago 3m - - 2026-03-10T08:42:09.777 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 51s ago 3m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:09.777 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 52s ago 3m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:09.777 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 52s ago 3m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:09.955 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:09.955 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:09.955 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:10.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:10 vm02 bash[17473]: audit 2026-03-10T08:42:09.612258+0000 mgr.vm02.ttibzz (mgr.14195) 330 : audit [DBG] from='client.14920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:10.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:10 vm02 bash[17473]: audit 2026-03-10T08:42:09.612258+0000 mgr.vm02.ttibzz (mgr.14195) 330 : audit [DBG] from='client.14920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:10.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:10 vm02 bash[17473]: audit 2026-03-10T08:42:09.776417+0000 mgr.vm02.ttibzz (mgr.14195) 331 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:10.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:10 vm02 bash[17473]: audit 2026-03-10T08:42:09.776417+0000 mgr.vm02.ttibzz (mgr.14195) 331 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:10.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:10 vm02 bash[17473]: audit 2026-03-10T08:42:09.956864+0000 mon.vm02 (mon.0) 851 : audit [DBG] from='client.? 192.168.123.102:0/1652669069' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:10.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:10 vm02 bash[17473]: audit 2026-03-10T08:42:09.956864+0000 mon.vm02 (mon.0) 851 : audit [DBG] from='client.? 192.168.123.102:0/1652669069' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:11.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:11 vm02 bash[17473]: cluster 2026-03-10T08:42:10.092097+0000 mgr.vm02.ttibzz (mgr.14195) 332 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:11.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:11 vm02 bash[17473]: cluster 2026-03-10T08:42:10.092097+0000 mgr.vm02.ttibzz (mgr.14195) 332 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:13.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:13 vm02 bash[17473]: cluster 2026-03-10T08:42:12.092505+0000 mgr.vm02.ttibzz (mgr.14195) 333 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:13.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:13 vm02 bash[17473]: cluster 2026-03-10T08:42:12.092505+0000 mgr.vm02.ttibzz (mgr.14195) 333 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:15.115 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:15.255 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:15.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 57s ago 3m - - 2026-03-10T08:42:15.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 57s ago 3m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:15.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 57s ago 3m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:15.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 57s ago 3m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:15.454 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:15.454 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:15.454 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:15.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:15 vm02 bash[17473]: cluster 2026-03-10T08:42:14.092943+0000 mgr.vm02.ttibzz (mgr.14195) 334 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:15.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:15 vm02 bash[17473]: cluster 2026-03-10T08:42:14.092943+0000 mgr.vm02.ttibzz (mgr.14195) 334 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:16.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:16 vm02 bash[17473]: audit 2026-03-10T08:42:15.103825+0000 mgr.vm02.ttibzz (mgr.14195) 335 : audit [DBG] from='client.14932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:16.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:16 vm02 bash[17473]: audit 2026-03-10T08:42:15.103825+0000 mgr.vm02.ttibzz (mgr.14195) 335 : audit [DBG] from='client.14932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:16.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:16 vm02 bash[17473]: audit 2026-03-10T08:42:15.254940+0000 mgr.vm02.ttibzz (mgr.14195) 336 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:16.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:16 vm02 bash[17473]: audit 2026-03-10T08:42:15.254940+0000 mgr.vm02.ttibzz (mgr.14195) 336 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:16.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:16 vm02 bash[17473]: audit 2026-03-10T08:42:15.455578+0000 mon.vm02 (mon.0) 852 : audit [DBG] from='client.? 192.168.123.102:0/2142215872' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:16.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:16 vm02 bash[17473]: audit 2026-03-10T08:42:15.455578+0000 mon.vm02 (mon.0) 852 : audit [DBG] from='client.? 192.168.123.102:0/2142215872' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:17.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:17 vm02 bash[17473]: cluster 2026-03-10T08:42:16.093369+0000 mgr.vm02.ttibzz (mgr.14195) 337 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:17.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:17 vm02 bash[17473]: cluster 2026-03-10T08:42:16.093369+0000 mgr.vm02.ttibzz (mgr.14195) 337 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:18.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:18 vm02 bash[17473]: audit 2026-03-10T08:42:18.125109+0000 mon.vm02 (mon.0) 853 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:42:18.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:18 vm02 bash[17473]: audit 2026-03-10T08:42:18.125109+0000 mon.vm02 (mon.0) 853 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:42:19.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:19 vm02 bash[17473]: cluster 2026-03-10T08:42:18.093719+0000 mgr.vm02.ttibzz (mgr.14195) 338 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:19.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:19 vm02 bash[17473]: cluster 2026-03-10T08:42:18.093719+0000 mgr.vm02.ttibzz (mgr.14195) 338 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:19.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:19 vm02 bash[17473]: audit 2026-03-10T08:42:19.240637+0000 mon.vm02 (mon.0) 854 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:19.692 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:19 vm02 bash[17473]: audit 2026-03-10T08:42:19.240637+0000 mon.vm02 (mon.0) 854 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:20.621 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:20.764 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:20.764 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 62s ago 3m - - 2026-03-10T08:42:20.764 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 62s ago 3m 102M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:20.764 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 63s ago 3m 95.1M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:20.764 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 63s ago 3m 95.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:20.945 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:20.945 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:20.945 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: cluster 2026-03-10T08:42:20.094118+0000 mgr.vm02.ttibzz (mgr.14195) 339 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: cluster 2026-03-10T08:42:20.094118+0000 mgr.vm02.ttibzz (mgr.14195) 339 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: audit 2026-03-10T08:42:20.609072+0000 mgr.vm02.ttibzz (mgr.14195) 340 : audit [DBG] from='client.14944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: audit 2026-03-10T08:42:20.609072+0000 mgr.vm02.ttibzz (mgr.14195) 340 : audit [DBG] from='client.14944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: audit 2026-03-10T08:42:20.763329+0000 mgr.vm02.ttibzz (mgr.14195) 341 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: audit 2026-03-10T08:42:20.763329+0000 mgr.vm02.ttibzz (mgr.14195) 341 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: audit 2026-03-10T08:42:20.946936+0000 mon.vm02 (mon.0) 855 : audit [DBG] from='client.? 192.168.123.102:0/151259266' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:21.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:21 vm02 bash[17473]: audit 2026-03-10T08:42:20.946936+0000 mon.vm02 (mon.0) 855 : audit [DBG] from='client.? 192.168.123.102:0/151259266' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:23.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:23 vm02 bash[17473]: cluster 2026-03-10T08:42:22.094520+0000 mgr.vm02.ttibzz (mgr.14195) 342 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:23.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:23 vm02 bash[17473]: cluster 2026-03-10T08:42:22.094520+0000 mgr.vm02.ttibzz (mgr.14195) 342 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:23.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:23 vm02 bash[17473]: audit 2026-03-10T08:42:23.394076+0000 mon.vm02 (mon.0) 856 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:23.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:23 vm02 bash[17473]: audit 2026-03-10T08:42:23.394076+0000 mon.vm02 (mon.0) 856 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:23.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:23 vm02 bash[17473]: audit 2026-03-10T08:42:23.400767+0000 mon.vm02 (mon.0) 857 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:23.784 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:23 vm02 bash[17473]: audit 2026-03-10T08:42:23.400767+0000 mon.vm02 (mon.0) 857 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.628570+0000 mon.vm02 (mon.0) 858 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.628570+0000 mon.vm02 (mon.0) 858 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.633657+0000 mon.vm02 (mon.0) 859 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.633657+0000 mon.vm02 (mon.0) 859 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.948664+0000 mon.vm02 (mon.0) 860 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.948664+0000 mon.vm02 (mon.0) 860 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.949180+0000 mon.vm02 (mon.0) 861 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.949180+0000 mon.vm02 (mon.0) 861 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:42:25.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.954153+0000 mon.vm02 (mon.0) 862 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:25.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.954153+0000 mon.vm02 (mon.0) 862 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:42:25.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.955648+0000 mon.vm02 (mon.0) 863 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:42:25.035 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:24 vm02 bash[17473]: audit 2026-03-10T08:42:23.955648+0000 mon.vm02 (mon.0) 863 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:42:26.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:25 vm02 bash[17473]: cluster 2026-03-10T08:42:23.950134+0000 mgr.vm02.ttibzz (mgr.14195) 343 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 259 B/s rd, 518 B/s wr, 0 op/s 2026-03-10T08:42:26.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:25 vm02 bash[17473]: cluster 2026-03-10T08:42:23.950134+0000 mgr.vm02.ttibzz (mgr.14195) 343 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 259 B/s rd, 518 B/s wr, 0 op/s 2026-03-10T08:42:26.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:25 vm02 bash[17473]: cluster 2026-03-10T08:42:23.950495+0000 mgr.vm02.ttibzz (mgr.14195) 344 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 623 B/s wr, 0 op/s 2026-03-10T08:42:26.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:25 vm02 bash[17473]: cluster 2026-03-10T08:42:23.950495+0000 mgr.vm02.ttibzz (mgr.14195) 344 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 311 B/s rd, 623 B/s wr, 0 op/s 2026-03-10T08:42:26.121 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:26.272 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:26.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 2s ago 3m - - 2026-03-10T08:42:26.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 2s ago 3m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:26.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 2s ago 3m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:26.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 2s ago 3m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:26.469 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:26.469 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:26.469 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:26.992 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:26 vm02 bash[17473]: audit 2026-03-10T08:42:26.471178+0000 mon.vm02 (mon.0) 864 : audit [DBG] from='client.? 192.168.123.102:0/2902490041' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:26.992 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:26 vm02 bash[17473]: audit 2026-03-10T08:42:26.471178+0000 mon.vm02 (mon.0) 864 : audit [DBG] from='client.? 192.168.123.102:0/2902490041' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:28.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:27 vm02 bash[17473]: cluster 2026-03-10T08:42:25.950933+0000 mgr.vm02.ttibzz (mgr.14195) 345 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:28.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:27 vm02 bash[17473]: cluster 2026-03-10T08:42:25.950933+0000 mgr.vm02.ttibzz (mgr.14195) 345 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:28.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:27 vm02 bash[17473]: audit 2026-03-10T08:42:26.108090+0000 mgr.vm02.ttibzz (mgr.14195) 346 : audit [DBG] from='client.14956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:28.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:27 vm02 bash[17473]: audit 2026-03-10T08:42:26.108090+0000 mgr.vm02.ttibzz (mgr.14195) 346 : audit [DBG] from='client.14956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:28.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:27 vm02 bash[17473]: audit 2026-03-10T08:42:26.271972+0000 mgr.vm02.ttibzz (mgr.14195) 347 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:28.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:27 vm02 bash[17473]: audit 2026-03-10T08:42:26.271972+0000 mgr.vm02.ttibzz (mgr.14195) 347 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:29 vm02 bash[17473]: cluster 2026-03-10T08:42:27.951319+0000 mgr.vm02.ttibzz (mgr.14195) 348 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:29 vm02 bash[17473]: cluster 2026-03-10T08:42:27.951319+0000 mgr.vm02.ttibzz (mgr.14195) 348 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:31.642 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:31.793 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:31.793 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 8s ago 3m - - 2026-03-10T08:42:31.793 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 8s ago 3m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:31.793 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 8s ago 3m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:31.793 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 8s ago 3m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:31.979 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:31.979 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:31.979 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:32.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:31 vm02 bash[17473]: cluster 2026-03-10T08:42:29.951727+0000 mgr.vm02.ttibzz (mgr.14195) 349 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:32.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:31 vm02 bash[17473]: cluster 2026-03-10T08:42:29.951727+0000 mgr.vm02.ttibzz (mgr.14195) 349 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:33.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:32 vm02 bash[17473]: audit 2026-03-10T08:42:31.629123+0000 mgr.vm02.ttibzz (mgr.14195) 350 : audit [DBG] from='client.14968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:33.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:32 vm02 bash[17473]: audit 2026-03-10T08:42:31.629123+0000 mgr.vm02.ttibzz (mgr.14195) 350 : audit [DBG] from='client.14968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:33.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:32 vm02 bash[17473]: audit 2026-03-10T08:42:31.792974+0000 mgr.vm02.ttibzz (mgr.14195) 351 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:33.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:32 vm02 bash[17473]: audit 2026-03-10T08:42:31.792974+0000 mgr.vm02.ttibzz (mgr.14195) 351 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:33.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:32 vm02 bash[17473]: audit 2026-03-10T08:42:31.981215+0000 mon.vm02 (mon.0) 865 : audit [DBG] from='client.? 192.168.123.102:0/2133624772' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:33.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:32 vm02 bash[17473]: audit 2026-03-10T08:42:31.981215+0000 mon.vm02 (mon.0) 865 : audit [DBG] from='client.? 192.168.123.102:0/2133624772' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:34.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:33 vm02 bash[17473]: cluster 2026-03-10T08:42:31.952203+0000 mgr.vm02.ttibzz (mgr.14195) 352 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:34.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:33 vm02 bash[17473]: cluster 2026-03-10T08:42:31.952203+0000 mgr.vm02.ttibzz (mgr.14195) 352 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:35.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:34 vm02 bash[17473]: audit 2026-03-10T08:42:34.240974+0000 mon.vm02 (mon.0) 866 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:35.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:34 vm02 bash[17473]: audit 2026-03-10T08:42:34.240974+0000 mon.vm02 (mon.0) 866 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:36.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:35 vm02 bash[17473]: cluster 2026-03-10T08:42:33.952674+0000 mgr.vm02.ttibzz (mgr.14195) 353 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:36.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:35 vm02 bash[17473]: cluster 2026-03-10T08:42:33.952674+0000 mgr.vm02.ttibzz (mgr.14195) 353 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:37.177 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:37.336 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:37.336 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 13s ago 3m - - 2026-03-10T08:42:37.336 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 13s ago 3m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:37.336 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 13s ago 3m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:37.336 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 13s ago 3m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:37.521 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:37.522 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:37.522 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:38.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:37 vm02 bash[17473]: cluster 2026-03-10T08:42:35.953163+0000 mgr.vm02.ttibzz (mgr.14195) 354 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:38.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:37 vm02 bash[17473]: cluster 2026-03-10T08:42:35.953163+0000 mgr.vm02.ttibzz (mgr.14195) 354 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:38.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:37 vm02 bash[17473]: audit 2026-03-10T08:42:37.523962+0000 mon.vm02 (mon.0) 867 : audit [DBG] from='client.? 192.168.123.102:0/1876551322' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:38.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:37 vm02 bash[17473]: audit 2026-03-10T08:42:37.523962+0000 mon.vm02 (mon.0) 867 : audit [DBG] from='client.? 192.168.123.102:0/1876551322' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:39.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:38 vm02 bash[17473]: audit 2026-03-10T08:42:37.163258+0000 mgr.vm02.ttibzz (mgr.14195) 355 : audit [DBG] from='client.14980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:39.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:38 vm02 bash[17473]: audit 2026-03-10T08:42:37.163258+0000 mgr.vm02.ttibzz (mgr.14195) 355 : audit [DBG] from='client.14980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:39.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:38 vm02 bash[17473]: audit 2026-03-10T08:42:37.335330+0000 mgr.vm02.ttibzz (mgr.14195) 356 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:39.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:38 vm02 bash[17473]: audit 2026-03-10T08:42:37.335330+0000 mgr.vm02.ttibzz (mgr.14195) 356 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:40.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:39 vm02 bash[17473]: cluster 2026-03-10T08:42:37.953570+0000 mgr.vm02.ttibzz (mgr.14195) 357 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:40.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:39 vm02 bash[17473]: cluster 2026-03-10T08:42:37.953570+0000 mgr.vm02.ttibzz (mgr.14195) 357 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:42.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:41 vm02 bash[17473]: cluster 2026-03-10T08:42:39.953974+0000 mgr.vm02.ttibzz (mgr.14195) 358 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:42.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:41 vm02 bash[17473]: cluster 2026-03-10T08:42:39.953974+0000 mgr.vm02.ttibzz (mgr.14195) 358 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:42.693 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:42.846 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:42.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 19s ago 3m - - 2026-03-10T08:42:42.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 19s ago 3m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:42.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 19s ago 3m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:42.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 19s ago 3m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:43.031 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:43.031 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:43.031 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: cluster 2026-03-10T08:42:41.954388+0000 mgr.vm02.ttibzz (mgr.14195) 359 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: cluster 2026-03-10T08:42:41.954388+0000 mgr.vm02.ttibzz (mgr.14195) 359 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: audit 2026-03-10T08:42:42.678781+0000 mgr.vm02.ttibzz (mgr.14195) 360 : audit [DBG] from='client.24653 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: audit 2026-03-10T08:42:42.678781+0000 mgr.vm02.ttibzz (mgr.14195) 360 : audit [DBG] from='client.24653 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: audit 2026-03-10T08:42:42.846315+0000 mgr.vm02.ttibzz (mgr.14195) 361 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: audit 2026-03-10T08:42:42.846315+0000 mgr.vm02.ttibzz (mgr.14195) 361 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: audit 2026-03-10T08:42:43.033466+0000 mon.vm02 (mon.0) 868 : audit [DBG] from='client.? 192.168.123.102:0/3989441455' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:44.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:43 vm02 bash[17473]: audit 2026-03-10T08:42:43.033466+0000 mon.vm02 (mon.0) 868 : audit [DBG] from='client.? 192.168.123.102:0/3989441455' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:46.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:45 vm02 bash[17473]: cluster 2026-03-10T08:42:43.954851+0000 mgr.vm02.ttibzz (mgr.14195) 362 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:46.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:45 vm02 bash[17473]: cluster 2026-03-10T08:42:43.954851+0000 mgr.vm02.ttibzz (mgr.14195) 362 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:42:48.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:47 vm02 bash[17473]: cluster 2026-03-10T08:42:45.955319+0000 mgr.vm02.ttibzz (mgr.14195) 363 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:48.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:47 vm02 bash[17473]: cluster 2026-03-10T08:42:45.955319+0000 mgr.vm02.ttibzz (mgr.14195) 363 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:48.200 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:48.348 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:48.348 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 24s ago 3m - - 2026-03-10T08:42:48.348 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 24s ago 3m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:48.348 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 24s ago 3m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:48.348 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 24s ago 3m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:48.522 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:48.522 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:48.523 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:49.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:48 vm02 bash[17473]: audit 2026-03-10T08:42:48.524905+0000 mon.vm02 (mon.0) 869 : audit [DBG] from='client.? 192.168.123.102:0/2091210485' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:49.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:48 vm02 bash[17473]: audit 2026-03-10T08:42:48.524905+0000 mon.vm02 (mon.0) 869 : audit [DBG] from='client.? 192.168.123.102:0/2091210485' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: cluster 2026-03-10T08:42:47.955720+0000 mgr.vm02.ttibzz (mgr.14195) 364 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: cluster 2026-03-10T08:42:47.955720+0000 mgr.vm02.ttibzz (mgr.14195) 364 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: audit 2026-03-10T08:42:48.188157+0000 mgr.vm02.ttibzz (mgr.14195) 365 : audit [DBG] from='client.24659 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: audit 2026-03-10T08:42:48.188157+0000 mgr.vm02.ttibzz (mgr.14195) 365 : audit [DBG] from='client.24659 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: audit 2026-03-10T08:42:48.347711+0000 mgr.vm02.ttibzz (mgr.14195) 366 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: audit 2026-03-10T08:42:48.347711+0000 mgr.vm02.ttibzz (mgr.14195) 366 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: audit 2026-03-10T08:42:49.241084+0000 mon.vm02 (mon.0) 870 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:50.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:49 vm02 bash[17473]: audit 2026-03-10T08:42:49.241084+0000 mon.vm02 (mon.0) 870 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:42:52.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:51 vm02 bash[17473]: cluster 2026-03-10T08:42:49.956124+0000 mgr.vm02.ttibzz (mgr.14195) 367 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:52.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:51 vm02 bash[17473]: cluster 2026-03-10T08:42:49.956124+0000 mgr.vm02.ttibzz (mgr.14195) 367 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:53.685 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:53.834 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:53.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 30s ago 3m - - 2026-03-10T08:42:53.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 30s ago 3m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:53.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 30s ago 3m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:53.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 30s ago 3m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:54.009 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:54.009 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:54.009 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:42:54.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:53 vm02 bash[17473]: cluster 2026-03-10T08:42:51.956504+0000 mgr.vm02.ttibzz (mgr.14195) 368 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:54.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:53 vm02 bash[17473]: cluster 2026-03-10T08:42:51.956504+0000 mgr.vm02.ttibzz (mgr.14195) 368 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:55.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:54 vm02 bash[17473]: audit 2026-03-10T08:42:53.672598+0000 mgr.vm02.ttibzz (mgr.14195) 369 : audit [DBG] from='client.15016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:55.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:54 vm02 bash[17473]: audit 2026-03-10T08:42:53.672598+0000 mgr.vm02.ttibzz (mgr.14195) 369 : audit [DBG] from='client.15016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:55.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:54 vm02 bash[17473]: audit 2026-03-10T08:42:53.833927+0000 mgr.vm02.ttibzz (mgr.14195) 370 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:55.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:54 vm02 bash[17473]: audit 2026-03-10T08:42:53.833927+0000 mgr.vm02.ttibzz (mgr.14195) 370 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:42:55.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:54 vm02 bash[17473]: audit 2026-03-10T08:42:54.008554+0000 mon.vm07 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.102:0/24451713' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:55.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:54 vm02 bash[17473]: audit 2026-03-10T08:42:54.008554+0000 mon.vm07 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.102:0/24451713' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:42:56.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:55 vm02 bash[17473]: cluster 2026-03-10T08:42:53.956938+0000 mgr.vm02.ttibzz (mgr.14195) 371 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:56.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:55 vm02 bash[17473]: cluster 2026-03-10T08:42:53.956938+0000 mgr.vm02.ttibzz (mgr.14195) 371 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:58.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:57 vm02 bash[17473]: cluster 2026-03-10T08:42:55.957388+0000 mgr.vm02.ttibzz (mgr.14195) 372 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:58.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:57 vm02 bash[17473]: cluster 2026-03-10T08:42:55.957388+0000 mgr.vm02.ttibzz (mgr.14195) 372 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:42:59.174 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:42:59.318 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:42:59.318 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 35s ago 3m - - 2026-03-10T08:42:59.318 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 35s ago 3m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:42:59.318 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 35s ago 3m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:42:59.318 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (3m) 35s ago 3m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:42:59.493 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:42:59.493 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:42:59.493 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:00.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:59 vm02 bash[17473]: cluster 2026-03-10T08:42:57.957778+0000 mgr.vm02.ttibzz (mgr.14195) 373 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:00.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:59 vm02 bash[17473]: cluster 2026-03-10T08:42:57.957778+0000 mgr.vm02.ttibzz (mgr.14195) 373 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:00.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:59 vm02 bash[17473]: audit 2026-03-10T08:42:59.495768+0000 mon.vm02 (mon.0) 871 : audit [DBG] from='client.? 192.168.123.102:0/2472312192' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:00.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:42:59 vm02 bash[17473]: audit 2026-03-10T08:42:59.495768+0000 mon.vm02 (mon.0) 871 : audit [DBG] from='client.? 192.168.123.102:0/2472312192' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:01.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:00 vm02 bash[17473]: audit 2026-03-10T08:42:59.162582+0000 mgr.vm02.ttibzz (mgr.14195) 374 : audit [DBG] from='client.15028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:01.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:00 vm02 bash[17473]: audit 2026-03-10T08:42:59.162582+0000 mgr.vm02.ttibzz (mgr.14195) 374 : audit [DBG] from='client.15028 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:01.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:00 vm02 bash[17473]: audit 2026-03-10T08:42:59.318514+0000 mgr.vm02.ttibzz (mgr.14195) 375 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:01.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:00 vm02 bash[17473]: audit 2026-03-10T08:42:59.318514+0000 mgr.vm02.ttibzz (mgr.14195) 375 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:02.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:01 vm02 bash[17473]: cluster 2026-03-10T08:42:59.958120+0000 mgr.vm02.ttibzz (mgr.14195) 376 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:02.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:01 vm02 bash[17473]: cluster 2026-03-10T08:42:59.958120+0000 mgr.vm02.ttibzz (mgr.14195) 376 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:04.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:03 vm02 bash[17473]: cluster 2026-03-10T08:43:01.958514+0000 mgr.vm02.ttibzz (mgr.14195) 377 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:04.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:03 vm02 bash[17473]: cluster 2026-03-10T08:43:01.958514+0000 mgr.vm02.ttibzz (mgr.14195) 377 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:04.655 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:04.796 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:04.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 41s ago 3m - - 2026-03-10T08:43:04.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 41s ago 4m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:04.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 41s ago 4m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:04.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 41s ago 4m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:04.989 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:04.990 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:04.990 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:05.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:04 vm02 bash[17473]: audit 2026-03-10T08:43:04.241228+0000 mon.vm02 (mon.0) 872 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:05.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:04 vm02 bash[17473]: audit 2026-03-10T08:43:04.241228+0000 mon.vm02 (mon.0) 872 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: cluster 2026-03-10T08:43:03.958887+0000 mgr.vm02.ttibzz (mgr.14195) 378 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: cluster 2026-03-10T08:43:03.958887+0000 mgr.vm02.ttibzz (mgr.14195) 378 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: audit 2026-03-10T08:43:04.644581+0000 mgr.vm02.ttibzz (mgr.14195) 379 : audit [DBG] from='client.15040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: audit 2026-03-10T08:43:04.644581+0000 mgr.vm02.ttibzz (mgr.14195) 379 : audit [DBG] from='client.15040 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: audit 2026-03-10T08:43:04.795899+0000 mgr.vm02.ttibzz (mgr.14195) 380 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: audit 2026-03-10T08:43:04.795899+0000 mgr.vm02.ttibzz (mgr.14195) 380 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: audit 2026-03-10T08:43:04.992074+0000 mon.vm02 (mon.0) 873 : audit [DBG] from='client.? 192.168.123.102:0/1691291645' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:06.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:05 vm02 bash[17473]: audit 2026-03-10T08:43:04.992074+0000 mon.vm02 (mon.0) 873 : audit [DBG] from='client.? 192.168.123.102:0/1691291645' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:08.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:07 vm02 bash[17473]: cluster 2026-03-10T08:43:05.959266+0000 mgr.vm02.ttibzz (mgr.14195) 381 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:08.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:07 vm02 bash[17473]: cluster 2026-03-10T08:43:05.959266+0000 mgr.vm02.ttibzz (mgr.14195) 381 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:10.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:09 vm02 bash[17473]: cluster 2026-03-10T08:43:07.959693+0000 mgr.vm02.ttibzz (mgr.14195) 382 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:10.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:09 vm02 bash[17473]: cluster 2026-03-10T08:43:07.959693+0000 mgr.vm02.ttibzz (mgr.14195) 382 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:10.151 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:10.301 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:10.302 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 46s ago 4m - - 2026-03-10T08:43:10.302 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 46s ago 4m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:10.302 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 46s ago 4m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:10.302 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 46s ago 4m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:10.479 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:10.479 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:10.479 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:11.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:10 vm02 bash[17473]: audit 2026-03-10T08:43:10.481887+0000 mon.vm02 (mon.0) 874 : audit [DBG] from='client.? 192.168.123.102:0/2697251137' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:11.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:10 vm02 bash[17473]: audit 2026-03-10T08:43:10.481887+0000 mon.vm02 (mon.0) 874 : audit [DBG] from='client.? 192.168.123.102:0/2697251137' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:12.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:11 vm02 bash[17473]: cluster 2026-03-10T08:43:09.960111+0000 mgr.vm02.ttibzz (mgr.14195) 383 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:12.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:11 vm02 bash[17473]: cluster 2026-03-10T08:43:09.960111+0000 mgr.vm02.ttibzz (mgr.14195) 383 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:12.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:11 vm02 bash[17473]: audit 2026-03-10T08:43:10.141315+0000 mgr.vm02.ttibzz (mgr.14195) 384 : audit [DBG] from='client.15052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:12.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:11 vm02 bash[17473]: audit 2026-03-10T08:43:10.141315+0000 mgr.vm02.ttibzz (mgr.14195) 384 : audit [DBG] from='client.15052 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:12.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:11 vm02 bash[17473]: audit 2026-03-10T08:43:10.301830+0000 mgr.vm02.ttibzz (mgr.14195) 385 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:12.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:11 vm02 bash[17473]: audit 2026-03-10T08:43:10.301830+0000 mgr.vm02.ttibzz (mgr.14195) 385 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:14.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:13 vm02 bash[17473]: cluster 2026-03-10T08:43:11.960484+0000 mgr.vm02.ttibzz (mgr.14195) 386 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:14.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:13 vm02 bash[17473]: cluster 2026-03-10T08:43:11.960484+0000 mgr.vm02.ttibzz (mgr.14195) 386 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:15.637 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:15.783 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:15.783 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 52s ago 4m - - 2026-03-10T08:43:15.783 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 52s ago 4m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:15.783 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 52s ago 4m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:15.783 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 52s ago 4m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:15.962 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:15.963 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:15.963 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:16.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:15 vm02 bash[17473]: cluster 2026-03-10T08:43:13.960887+0000 mgr.vm02.ttibzz (mgr.14195) 387 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:16.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:15 vm02 bash[17473]: cluster 2026-03-10T08:43:13.960887+0000 mgr.vm02.ttibzz (mgr.14195) 387 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:17.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:16 vm02 bash[17473]: audit 2026-03-10T08:43:15.627108+0000 mgr.vm02.ttibzz (mgr.14195) 388 : audit [DBG] from='client.15064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:17.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:16 vm02 bash[17473]: audit 2026-03-10T08:43:15.627108+0000 mgr.vm02.ttibzz (mgr.14195) 388 : audit [DBG] from='client.15064 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:17.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:16 vm02 bash[17473]: audit 2026-03-10T08:43:15.783365+0000 mgr.vm02.ttibzz (mgr.14195) 389 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:17.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:16 vm02 bash[17473]: audit 2026-03-10T08:43:15.783365+0000 mgr.vm02.ttibzz (mgr.14195) 389 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:17.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:16 vm02 bash[17473]: audit 2026-03-10T08:43:15.965384+0000 mon.vm02 (mon.0) 875 : audit [DBG] from='client.? 192.168.123.102:0/1873051326' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:17.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:16 vm02 bash[17473]: audit 2026-03-10T08:43:15.965384+0000 mon.vm02 (mon.0) 875 : audit [DBG] from='client.? 192.168.123.102:0/1873051326' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:18.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:17 vm02 bash[17473]: cluster 2026-03-10T08:43:15.961260+0000 mgr.vm02.ttibzz (mgr.14195) 390 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:18.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:17 vm02 bash[17473]: cluster 2026-03-10T08:43:15.961260+0000 mgr.vm02.ttibzz (mgr.14195) 390 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:20.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:19 vm02 bash[17473]: cluster 2026-03-10T08:43:17.961659+0000 mgr.vm02.ttibzz (mgr.14195) 391 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:20.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:19 vm02 bash[17473]: cluster 2026-03-10T08:43:17.961659+0000 mgr.vm02.ttibzz (mgr.14195) 391 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:20.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:19 vm02 bash[17473]: audit 2026-03-10T08:43:19.241580+0000 mon.vm02 (mon.0) 876 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:20.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:19 vm02 bash[17473]: audit 2026-03-10T08:43:19.241580+0000 mon.vm02 (mon.0) 876 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:21.130 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:21.274 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:21.274 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 57s ago 4m - - 2026-03-10T08:43:21.274 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 57s ago 4m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:21.274 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 57s ago 4m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:21.274 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 57s ago 4m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:21.454 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:21.454 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:21.454 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:22.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:21 vm02 bash[17473]: cluster 2026-03-10T08:43:19.962029+0000 mgr.vm02.ttibzz (mgr.14195) 392 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:22.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:21 vm02 bash[17473]: cluster 2026-03-10T08:43:19.962029+0000 mgr.vm02.ttibzz (mgr.14195) 392 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:22.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:21 vm02 bash[17473]: audit 2026-03-10T08:43:21.456579+0000 mon.vm02 (mon.0) 877 : audit [DBG] from='client.? 192.168.123.102:0/1443186796' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:22.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:21 vm02 bash[17473]: audit 2026-03-10T08:43:21.456579+0000 mon.vm02 (mon.0) 877 : audit [DBG] from='client.? 192.168.123.102:0/1443186796' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:22 vm02 bash[17473]: audit 2026-03-10T08:43:21.118586+0000 mgr.vm02.ttibzz (mgr.14195) 393 : audit [DBG] from='client.15076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:23.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:22 vm02 bash[17473]: audit 2026-03-10T08:43:21.118586+0000 mgr.vm02.ttibzz (mgr.14195) 393 : audit [DBG] from='client.15076 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:23.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:22 vm02 bash[17473]: audit 2026-03-10T08:43:21.274584+0000 mgr.vm02.ttibzz (mgr.14195) 394 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:23.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:22 vm02 bash[17473]: audit 2026-03-10T08:43:21.274584+0000 mgr.vm02.ttibzz (mgr.14195) 394 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:24.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:23 vm02 bash[17473]: cluster 2026-03-10T08:43:21.962409+0000 mgr.vm02.ttibzz (mgr.14195) 395 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:24.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:23 vm02 bash[17473]: cluster 2026-03-10T08:43:21.962409+0000 mgr.vm02.ttibzz (mgr.14195) 395 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:25.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:24 vm02 bash[17473]: audit 2026-03-10T08:43:23.994792+0000 mon.vm02 (mon.0) 878 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:43:25.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:24 vm02 bash[17473]: audit 2026-03-10T08:43:23.994792+0000 mon.vm02 (mon.0) 878 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:43:26.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:25 vm02 bash[17473]: cluster 2026-03-10T08:43:23.962819+0000 mgr.vm02.ttibzz (mgr.14195) 396 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:26.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:25 vm02 bash[17473]: cluster 2026-03-10T08:43:23.962819+0000 mgr.vm02.ttibzz (mgr.14195) 396 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:26.621 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:26.774 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:26.774 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 63s ago 4m - - 2026-03-10T08:43:26.774 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 63s ago 4m 107M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:26.774 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 63s ago 4m 98.2M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:26.774 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 63s ago 4m 98.5M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:26.959 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:26.959 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:26.959 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:28.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: cluster 2026-03-10T08:43:25.963255+0000 mgr.vm02.ttibzz (mgr.14195) 397 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:28.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: cluster 2026-03-10T08:43:25.963255+0000 mgr.vm02.ttibzz (mgr.14195) 397 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:28.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: audit 2026-03-10T08:43:26.609441+0000 mgr.vm02.ttibzz (mgr.14195) 398 : audit [DBG] from='client.15088 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:28.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: audit 2026-03-10T08:43:26.609441+0000 mgr.vm02.ttibzz (mgr.14195) 398 : audit [DBG] from='client.15088 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:28.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: audit 2026-03-10T08:43:26.768185+0000 mgr.vm02.ttibzz (mgr.14195) 399 : audit [DBG] from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:28.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: audit 2026-03-10T08:43:26.768185+0000 mgr.vm02.ttibzz (mgr.14195) 399 : audit [DBG] from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:28.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: audit 2026-03-10T08:43:26.962104+0000 mon.vm02 (mon.0) 879 : audit [DBG] from='client.? 192.168.123.102:0/201113124' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:28.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:27 vm02 bash[17473]: audit 2026-03-10T08:43:26.962104+0000 mon.vm02 (mon.0) 879 : audit [DBG] from='client.? 192.168.123.102:0/201113124' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: cluster 2026-03-10T08:43:27.963611+0000 mgr.vm02.ttibzz (mgr.14195) 400 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: cluster 2026-03-10T08:43:27.963611+0000 mgr.vm02.ttibzz (mgr.14195) 400 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.269935+0000 mon.vm02 (mon.0) 880 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.269935+0000 mon.vm02 (mon.0) 880 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.275422+0000 mon.vm02 (mon.0) 881 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.275422+0000 mon.vm02 (mon.0) 881 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.542012+0000 mon.vm02 (mon.0) 882 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.542012+0000 mon.vm02 (mon.0) 882 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.547139+0000 mon.vm02 (mon.0) 883 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.547139+0000 mon.vm02 (mon.0) 883 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.839737+0000 mon.vm02 (mon.0) 884 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.839737+0000 mon.vm02 (mon.0) 884 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.840185+0000 mon.vm02 (mon.0) 885 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.840185+0000 mon.vm02 (mon.0) 885 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.846015+0000 mon.vm02 (mon.0) 886 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.846015+0000 mon.vm02 (mon.0) 886 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.847601+0000 mon.vm02 (mon.0) 887 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:43:30.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:29 vm02 bash[17473]: audit 2026-03-10T08:43:29.847601+0000 mon.vm02 (mon.0) 887 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:43:31.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:30 vm02 bash[17473]: cluster 2026-03-10T08:43:29.840989+0000 mgr.vm02.ttibzz (mgr.14195) 401 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:31.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:30 vm02 bash[17473]: cluster 2026-03-10T08:43:29.840989+0000 mgr.vm02.ttibzz (mgr.14195) 401 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:31.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:30 vm02 bash[17473]: cluster 2026-03-10T08:43:29.841185+0000 mgr.vm02.ttibzz (mgr.14195) 402 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:31.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:30 vm02 bash[17473]: cluster 2026-03-10T08:43:29.841185+0000 mgr.vm02.ttibzz (mgr.14195) 402 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:32.123 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:32.272 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:32.272 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 2s ago 4m - - 2026-03-10T08:43:32.272 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 2s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:32.272 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 3s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:32.272 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 3s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:32.463 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:32.463 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:32.463 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:32 vm02 bash[17473]: cluster 2026-03-10T08:43:31.841567+0000 mgr.vm02.ttibzz (mgr.14195) 403 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:32 vm02 bash[17473]: cluster 2026-03-10T08:43:31.841567+0000 mgr.vm02.ttibzz (mgr.14195) 403 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:32 vm02 bash[17473]: audit 2026-03-10T08:43:32.466066+0000 mon.vm02 (mon.0) 888 : audit [DBG] from='client.? 192.168.123.102:0/2959469341' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:32 vm02 bash[17473]: audit 2026-03-10T08:43:32.466066+0000 mon.vm02 (mon.0) 888 : audit [DBG] from='client.? 192.168.123.102:0/2959469341' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:33 vm02 bash[17473]: audit 2026-03-10T08:43:32.111920+0000 mgr.vm02.ttibzz (mgr.14195) 404 : audit [DBG] from='client.15100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:33 vm02 bash[17473]: audit 2026-03-10T08:43:32.111920+0000 mgr.vm02.ttibzz (mgr.14195) 404 : audit [DBG] from='client.15100 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:34.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:33 vm02 bash[17473]: audit 2026-03-10T08:43:32.272078+0000 mgr.vm02.ttibzz (mgr.14195) 405 : audit [DBG] from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:34.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:33 vm02 bash[17473]: audit 2026-03-10T08:43:32.272078+0000 mgr.vm02.ttibzz (mgr.14195) 405 : audit [DBG] from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:34.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:33 vm02 bash[17473]: cluster 2026-03-10T08:43:33.841972+0000 mgr.vm02.ttibzz (mgr.14195) 406 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:34.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:33 vm02 bash[17473]: cluster 2026-03-10T08:43:33.841972+0000 mgr.vm02.ttibzz (mgr.14195) 406 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:35.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:34 vm02 bash[17473]: audit 2026-03-10T08:43:34.241812+0000 mon.vm02 (mon.0) 889 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:35.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:34 vm02 bash[17473]: audit 2026-03-10T08:43:34.241812+0000 mon.vm02 (mon.0) 889 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:35 vm02 bash[17473]: cluster 2026-03-10T08:43:35.842388+0000 mgr.vm02.ttibzz (mgr.14195) 407 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:35 vm02 bash[17473]: cluster 2026-03-10T08:43:35.842388+0000 mgr.vm02.ttibzz (mgr.14195) 407 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:37.634 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:37.796 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:37.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 8s ago 4m - - 2026-03-10T08:43:37.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 8s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:37.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 8s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:37.796 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 8s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:37.985 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:37.985 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:37.985 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: audit 2026-03-10T08:43:37.622252+0000 mgr.vm02.ttibzz (mgr.14195) 408 : audit [DBG] from='client.15112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: audit 2026-03-10T08:43:37.622252+0000 mgr.vm02.ttibzz (mgr.14195) 408 : audit [DBG] from='client.15112 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: audit 2026-03-10T08:43:37.794641+0000 mgr.vm02.ttibzz (mgr.14195) 409 : audit [DBG] from='client.24727 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: audit 2026-03-10T08:43:37.794641+0000 mgr.vm02.ttibzz (mgr.14195) 409 : audit [DBG] from='client.24727 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: cluster 2026-03-10T08:43:37.842795+0000 mgr.vm02.ttibzz (mgr.14195) 410 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:39.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: cluster 2026-03-10T08:43:37.842795+0000 mgr.vm02.ttibzz (mgr.14195) 410 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:39.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: audit 2026-03-10T08:43:37.987805+0000 mon.vm02 (mon.0) 890 : audit [DBG] from='client.? 192.168.123.102:0/3455057945' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:39.284 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:38 vm02 bash[17473]: audit 2026-03-10T08:43:37.987805+0000 mon.vm02 (mon.0) 890 : audit [DBG] from='client.? 192.168.123.102:0/3455057945' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:41.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:41 vm02 bash[17473]: cluster 2026-03-10T08:43:39.843252+0000 mgr.vm02.ttibzz (mgr.14195) 411 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:41.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:41 vm02 bash[17473]: cluster 2026-03-10T08:43:39.843252+0000 mgr.vm02.ttibzz (mgr.14195) 411 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:42.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:42 vm02 bash[17473]: cluster 2026-03-10T08:43:41.843698+0000 mgr.vm02.ttibzz (mgr.14195) 412 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:42.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:42 vm02 bash[17473]: cluster 2026-03-10T08:43:41.843698+0000 mgr.vm02.ttibzz (mgr.14195) 412 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:43.150 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:43.305 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:43.305 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 13s ago 4m - - 2026-03-10T08:43:43.306 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 13s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:43.306 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 14s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:43.306 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 14s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:43.504 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:43.504 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:43.504 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:43 vm02 bash[17473]: audit 2026-03-10T08:43:43.506764+0000 mon.vm02 (mon.0) 891 : audit [DBG] from='client.? 192.168.123.102:0/1365384616' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:43 vm02 bash[17473]: audit 2026-03-10T08:43:43.506764+0000 mon.vm02 (mon.0) 891 : audit [DBG] from='client.? 192.168.123.102:0/1365384616' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:44 vm02 bash[17473]: audit 2026-03-10T08:43:43.139334+0000 mgr.vm02.ttibzz (mgr.14195) 413 : audit [DBG] from='client.15124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:44 vm02 bash[17473]: audit 2026-03-10T08:43:43.139334+0000 mgr.vm02.ttibzz (mgr.14195) 413 : audit [DBG] from='client.15124 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:44 vm02 bash[17473]: audit 2026-03-10T08:43:43.305702+0000 mgr.vm02.ttibzz (mgr.14195) 414 : audit [DBG] from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:44 vm02 bash[17473]: audit 2026-03-10T08:43:43.305702+0000 mgr.vm02.ttibzz (mgr.14195) 414 : audit [DBG] from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:45.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:44 vm02 bash[17473]: cluster 2026-03-10T08:43:43.844096+0000 mgr.vm02.ttibzz (mgr.14195) 415 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:45.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:44 vm02 bash[17473]: cluster 2026-03-10T08:43:43.844096+0000 mgr.vm02.ttibzz (mgr.14195) 415 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:47.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:46 vm02 bash[17473]: cluster 2026-03-10T08:43:45.844499+0000 mgr.vm02.ttibzz (mgr.14195) 416 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:47.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:46 vm02 bash[17473]: cluster 2026-03-10T08:43:45.844499+0000 mgr.vm02.ttibzz (mgr.14195) 416 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:48.664 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:48.804 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:48.804 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 19s ago 4m - - 2026-03-10T08:43:48.804 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 19s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:48.804 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 19s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:48.804 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 19s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:48.982 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:48.982 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:48.982 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:49.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:48 vm02 bash[17473]: cluster 2026-03-10T08:43:47.844840+0000 mgr.vm02.ttibzz (mgr.14195) 417 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:49.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:48 vm02 bash[17473]: cluster 2026-03-10T08:43:47.844840+0000 mgr.vm02.ttibzz (mgr.14195) 417 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:48.653920+0000 mgr.vm02.ttibzz (mgr.14195) 418 : audit [DBG] from='client.15136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:48.653920+0000 mgr.vm02.ttibzz (mgr.14195) 418 : audit [DBG] from='client.15136 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:48.804638+0000 mgr.vm02.ttibzz (mgr.14195) 419 : audit [DBG] from='client.24741 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:48.804638+0000 mgr.vm02.ttibzz (mgr.14195) 419 : audit [DBG] from='client.24741 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:48.985226+0000 mon.vm02 (mon.0) 892 : audit [DBG] from='client.? 192.168.123.102:0/512915150' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:48.985226+0000 mon.vm02 (mon.0) 892 : audit [DBG] from='client.? 192.168.123.102:0/512915150' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:49.241922+0000 mon.vm02 (mon.0) 893 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:49 vm02 bash[17473]: audit 2026-03-10T08:43:49.241922+0000 mon.vm02 (mon.0) 893 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:43:51.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:50 vm02 bash[17473]: cluster 2026-03-10T08:43:49.845189+0000 mgr.vm02.ttibzz (mgr.14195) 420 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:51.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:50 vm02 bash[17473]: cluster 2026-03-10T08:43:49.845189+0000 mgr.vm02.ttibzz (mgr.14195) 420 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:53.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:52 vm02 bash[17473]: cluster 2026-03-10T08:43:51.845572+0000 mgr.vm02.ttibzz (mgr.14195) 421 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:53.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:52 vm02 bash[17473]: cluster 2026-03-10T08:43:51.845572+0000 mgr.vm02.ttibzz (mgr.14195) 421 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:54.142 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:54.290 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:54.290 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 24s ago 4m - - 2026-03-10T08:43:54.290 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 24s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:54.290 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 25s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:54.290 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 25s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:54.472 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:54.472 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:54.472 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:43:55.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:54 vm02 bash[17473]: cluster 2026-03-10T08:43:53.845951+0000 mgr.vm02.ttibzz (mgr.14195) 422 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:55.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:54 vm02 bash[17473]: cluster 2026-03-10T08:43:53.845951+0000 mgr.vm02.ttibzz (mgr.14195) 422 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:55.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:54 vm02 bash[17473]: audit 2026-03-10T08:43:54.474670+0000 mon.vm02 (mon.0) 894 : audit [DBG] from='client.? 192.168.123.102:0/4193186851' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:55.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:54 vm02 bash[17473]: audit 2026-03-10T08:43:54.474670+0000 mon.vm02 (mon.0) 894 : audit [DBG] from='client.? 192.168.123.102:0/4193186851' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:43:56.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:55 vm02 bash[17473]: audit 2026-03-10T08:43:54.131251+0000 mgr.vm02.ttibzz (mgr.14195) 423 : audit [DBG] from='client.15148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:56.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:55 vm02 bash[17473]: audit 2026-03-10T08:43:54.131251+0000 mgr.vm02.ttibzz (mgr.14195) 423 : audit [DBG] from='client.15148 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:56.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:55 vm02 bash[17473]: audit 2026-03-10T08:43:54.290670+0000 mgr.vm02.ttibzz (mgr.14195) 424 : audit [DBG] from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:56.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:55 vm02 bash[17473]: audit 2026-03-10T08:43:54.290670+0000 mgr.vm02.ttibzz (mgr.14195) 424 : audit [DBG] from='client.15152 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:43:57.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:56 vm02 bash[17473]: cluster 2026-03-10T08:43:55.846383+0000 mgr.vm02.ttibzz (mgr.14195) 425 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:57.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:56 vm02 bash[17473]: cluster 2026-03-10T08:43:55.846383+0000 mgr.vm02.ttibzz (mgr.14195) 425 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:43:58.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:57 vm02 bash[17473]: cluster 2026-03-10T08:43:57.846780+0000 mgr.vm02.ttibzz (mgr.14195) 426 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:58.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:43:57 vm02 bash[17473]: cluster 2026-03-10T08:43:57.846780+0000 mgr.vm02.ttibzz (mgr.14195) 426 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:43:59.634 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:43:59.779 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:43:59.779 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 30s ago 4m - - 2026-03-10T08:43:59.779 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 30s ago 4m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:43:59.779 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 30s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:43:59.779 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (4m) 30s ago 4m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:43:59.961 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:43:59.961 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:43:59.961 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: audit 2026-03-10T08:43:59.624510+0000 mgr.vm02.ttibzz (mgr.14195) 427 : audit [DBG] from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: audit 2026-03-10T08:43:59.624510+0000 mgr.vm02.ttibzz (mgr.14195) 427 : audit [DBG] from='client.15160 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: audit 2026-03-10T08:43:59.779630+0000 mgr.vm02.ttibzz (mgr.14195) 428 : audit [DBG] from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: audit 2026-03-10T08:43:59.779630+0000 mgr.vm02.ttibzz (mgr.14195) 428 : audit [DBG] from='client.15164 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: cluster 2026-03-10T08:43:59.847128+0000 mgr.vm02.ttibzz (mgr.14195) 429 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: cluster 2026-03-10T08:43:59.847128+0000 mgr.vm02.ttibzz (mgr.14195) 429 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: audit 2026-03-10T08:43:59.961251+0000 mon.vm07 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.102:0/2591968809' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:00 vm02 bash[17473]: audit 2026-03-10T08:43:59.961251+0000 mon.vm07 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.102:0/2591968809' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:03.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:02 vm02 bash[17473]: cluster 2026-03-10T08:44:01.847501+0000 mgr.vm02.ttibzz (mgr.14195) 430 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:03.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:02 vm02 bash[17473]: cluster 2026-03-10T08:44:01.847501+0000 mgr.vm02.ttibzz (mgr.14195) 430 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:05.126 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:05.253 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:04 vm02 bash[17473]: cluster 2026-03-10T08:44:03.847887+0000 mgr.vm02.ttibzz (mgr.14195) 431 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:05.253 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:04 vm02 bash[17473]: cluster 2026-03-10T08:44:03.847887+0000 mgr.vm02.ttibzz (mgr.14195) 431 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:05.253 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:04 vm02 bash[17473]: audit 2026-03-10T08:44:04.242167+0000 mon.vm02 (mon.0) 895 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:05.253 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:04 vm02 bash[17473]: audit 2026-03-10T08:44:04.242167+0000 mon.vm02 (mon.0) 895 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:05.265 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:05.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 35s ago 5m - - 2026-03-10T08:44:05.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 35s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:05.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 36s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:05.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 36s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:05.440 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:05.440 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:05.440 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:06.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:05 vm02 bash[17473]: audit 2026-03-10T08:44:05.443472+0000 mon.vm02 (mon.0) 896 : audit [DBG] from='client.? 192.168.123.102:0/3418074208' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:06.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:05 vm02 bash[17473]: audit 2026-03-10T08:44:05.443472+0000 mon.vm02 (mon.0) 896 : audit [DBG] from='client.? 192.168.123.102:0/3418074208' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:07.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:06 vm02 bash[17473]: audit 2026-03-10T08:44:05.116390+0000 mgr.vm02.ttibzz (mgr.14195) 432 : audit [DBG] from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:07.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:06 vm02 bash[17473]: audit 2026-03-10T08:44:05.116390+0000 mgr.vm02.ttibzz (mgr.14195) 432 : audit [DBG] from='client.15172 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:07.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:06 vm02 bash[17473]: audit 2026-03-10T08:44:05.266427+0000 mgr.vm02.ttibzz (mgr.14195) 433 : audit [DBG] from='client.15176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:07.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:06 vm02 bash[17473]: audit 2026-03-10T08:44:05.266427+0000 mgr.vm02.ttibzz (mgr.14195) 433 : audit [DBG] from='client.15176 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:07.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:06 vm02 bash[17473]: cluster 2026-03-10T08:44:05.848276+0000 mgr.vm02.ttibzz (mgr.14195) 434 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:07.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:06 vm02 bash[17473]: cluster 2026-03-10T08:44:05.848276+0000 mgr.vm02.ttibzz (mgr.14195) 434 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:09.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:08 vm02 bash[17473]: cluster 2026-03-10T08:44:07.848669+0000 mgr.vm02.ttibzz (mgr.14195) 435 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:09.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:08 vm02 bash[17473]: cluster 2026-03-10T08:44:07.848669+0000 mgr.vm02.ttibzz (mgr.14195) 435 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:10.600 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:10.749 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:10.749 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 41s ago 5m - - 2026-03-10T08:44:10.749 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 41s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:10.749 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 41s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:10.749 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 41s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:10.922 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:10.922 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:10.922 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:11.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:10 vm02 bash[17473]: cluster 2026-03-10T08:44:09.849061+0000 mgr.vm02.ttibzz (mgr.14195) 436 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:11.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:10 vm02 bash[17473]: cluster 2026-03-10T08:44:09.849061+0000 mgr.vm02.ttibzz (mgr.14195) 436 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:11.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:10 vm02 bash[17473]: audit 2026-03-10T08:44:10.925336+0000 mon.vm02 (mon.0) 897 : audit [DBG] from='client.? 192.168.123.102:0/9465257' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:11.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:10 vm02 bash[17473]: audit 2026-03-10T08:44:10.925336+0000 mon.vm02 (mon.0) 897 : audit [DBG] from='client.? 192.168.123.102:0/9465257' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:12.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:11 vm02 bash[17473]: audit 2026-03-10T08:44:10.590147+0000 mgr.vm02.ttibzz (mgr.14195) 437 : audit [DBG] from='client.15184 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:12.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:11 vm02 bash[17473]: audit 2026-03-10T08:44:10.590147+0000 mgr.vm02.ttibzz (mgr.14195) 437 : audit [DBG] from='client.15184 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:12.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:11 vm02 bash[17473]: audit 2026-03-10T08:44:10.749717+0000 mgr.vm02.ttibzz (mgr.14195) 438 : audit [DBG] from='client.15188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:12.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:11 vm02 bash[17473]: audit 2026-03-10T08:44:10.749717+0000 mgr.vm02.ttibzz (mgr.14195) 438 : audit [DBG] from='client.15188 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:13.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:12 vm02 bash[17473]: cluster 2026-03-10T08:44:11.849529+0000 mgr.vm02.ttibzz (mgr.14195) 439 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:13.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:12 vm02 bash[17473]: cluster 2026-03-10T08:44:11.849529+0000 mgr.vm02.ttibzz (mgr.14195) 439 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:14.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:13 vm02 bash[17473]: cluster 2026-03-10T08:44:13.849938+0000 mgr.vm02.ttibzz (mgr.14195) 440 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:14.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:13 vm02 bash[17473]: cluster 2026-03-10T08:44:13.849938+0000 mgr.vm02.ttibzz (mgr.14195) 440 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:16.087 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:16.229 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:16.229 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 46s ago 5m - - 2026-03-10T08:44:16.229 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 46s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:16.229 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 46s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:16.229 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 46s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:16.410 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:16.410 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:16.410 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:17.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:16 vm02 bash[17473]: cluster 2026-03-10T08:44:15.850439+0000 mgr.vm02.ttibzz (mgr.14195) 441 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:17.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:16 vm02 bash[17473]: cluster 2026-03-10T08:44:15.850439+0000 mgr.vm02.ttibzz (mgr.14195) 441 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:17.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:16 vm02 bash[17473]: audit 2026-03-10T08:44:16.413369+0000 mon.vm02 (mon.0) 898 : audit [DBG] from='client.? 192.168.123.102:0/1908261087' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:17.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:16 vm02 bash[17473]: audit 2026-03-10T08:44:16.413369+0000 mon.vm02 (mon.0) 898 : audit [DBG] from='client.? 192.168.123.102:0/1908261087' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:18.172 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:17 vm02 bash[17473]: audit 2026-03-10T08:44:16.077408+0000 mgr.vm02.ttibzz (mgr.14195) 442 : audit [DBG] from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:18.172 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:17 vm02 bash[17473]: audit 2026-03-10T08:44:16.077408+0000 mgr.vm02.ttibzz (mgr.14195) 442 : audit [DBG] from='client.15196 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:18.172 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:17 vm02 bash[17473]: audit 2026-03-10T08:44:16.230287+0000 mgr.vm02.ttibzz (mgr.14195) 443 : audit [DBG] from='client.15200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:18.172 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:17 vm02 bash[17473]: audit 2026-03-10T08:44:16.230287+0000 mgr.vm02.ttibzz (mgr.14195) 443 : audit [DBG] from='client.15200 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:19.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:18 vm02 bash[17473]: cluster 2026-03-10T08:44:17.850820+0000 mgr.vm02.ttibzz (mgr.14195) 444 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:19.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:18 vm02 bash[17473]: cluster 2026-03-10T08:44:17.850820+0000 mgr.vm02.ttibzz (mgr.14195) 444 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:20.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:19 vm02 bash[17473]: audit 2026-03-10T08:44:19.242427+0000 mon.vm02 (mon.0) 899 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:20.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:19 vm02 bash[17473]: audit 2026-03-10T08:44:19.242427+0000 mon.vm02 (mon.0) 899 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:21.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:20 vm02 bash[17473]: cluster 2026-03-10T08:44:19.851182+0000 mgr.vm02.ttibzz (mgr.14195) 445 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:21.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:20 vm02 bash[17473]: cluster 2026-03-10T08:44:19.851182+0000 mgr.vm02.ttibzz (mgr.14195) 445 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:21.568 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:21.710 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:21.710 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 52s ago 5m - - 2026-03-10T08:44:21.710 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 52s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:21.710 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 52s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:21.710 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 52s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:21.886 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:21.886 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:21.886 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:22.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:21 vm02 bash[17473]: audit 2026-03-10T08:44:21.889573+0000 mon.vm02 (mon.0) 900 : audit [DBG] from='client.? 192.168.123.102:0/1853317504' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:22.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:21 vm02 bash[17473]: audit 2026-03-10T08:44:21.889573+0000 mon.vm02 (mon.0) 900 : audit [DBG] from='client.? 192.168.123.102:0/1853317504' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:23.222 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:22 vm02 bash[17473]: audit 2026-03-10T08:44:21.558353+0000 mgr.vm02.ttibzz (mgr.14195) 446 : audit [DBG] from='client.24783 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:23.222 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:22 vm02 bash[17473]: audit 2026-03-10T08:44:21.558353+0000 mgr.vm02.ttibzz (mgr.14195) 446 : audit [DBG] from='client.24783 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:23.222 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:22 vm02 bash[17473]: audit 2026-03-10T08:44:21.710842+0000 mgr.vm02.ttibzz (mgr.14195) 447 : audit [DBG] from='client.15212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:23.222 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:22 vm02 bash[17473]: audit 2026-03-10T08:44:21.710842+0000 mgr.vm02.ttibzz (mgr.14195) 447 : audit [DBG] from='client.15212 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:23.222 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:22 vm02 bash[17473]: cluster 2026-03-10T08:44:21.851603+0000 mgr.vm02.ttibzz (mgr.14195) 448 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:23.222 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:22 vm02 bash[17473]: cluster 2026-03-10T08:44:21.851603+0000 mgr.vm02.ttibzz (mgr.14195) 448 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:24.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:24 vm02 bash[17473]: cluster 2026-03-10T08:44:23.852005+0000 mgr.vm02.ttibzz (mgr.14195) 449 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:24.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:24 vm02 bash[17473]: cluster 2026-03-10T08:44:23.852005+0000 mgr.vm02.ttibzz (mgr.14195) 449 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:27.061 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:27.223 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:27.223 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 57s ago 5m - - 2026-03-10T08:44:27.223 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 57s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:27.223 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 57s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:27.223 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 57s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:27.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:26 vm02 bash[17473]: cluster 2026-03-10T08:44:25.852397+0000 mgr.vm02.ttibzz (mgr.14195) 450 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:27.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:26 vm02 bash[17473]: cluster 2026-03-10T08:44:25.852397+0000 mgr.vm02.ttibzz (mgr.14195) 450 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:44:27.417 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:27.417 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:27.417 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:28.274 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:27 vm02 bash[17473]: audit 2026-03-10T08:44:27.420747+0000 mon.vm02 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.102:0/750374796' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:28.274 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:27 vm02 bash[17473]: audit 2026-03-10T08:44:27.420747+0000 mon.vm02 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.102:0/750374796' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:29.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:28 vm02 bash[17473]: audit 2026-03-10T08:44:27.049894+0000 mgr.vm02.ttibzz (mgr.14195) 451 : audit [DBG] from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:29.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:28 vm02 bash[17473]: audit 2026-03-10T08:44:27.049894+0000 mgr.vm02.ttibzz (mgr.14195) 451 : audit [DBG] from='client.15220 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:29.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:28 vm02 bash[17473]: audit 2026-03-10T08:44:27.223619+0000 mgr.vm02.ttibzz (mgr.14195) 452 : audit [DBG] from='client.15224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:29.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:28 vm02 bash[17473]: audit 2026-03-10T08:44:27.223619+0000 mgr.vm02.ttibzz (mgr.14195) 452 : audit [DBG] from='client.15224 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:29.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:28 vm02 bash[17473]: cluster 2026-03-10T08:44:27.852812+0000 mgr.vm02.ttibzz (mgr.14195) 453 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:29.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:28 vm02 bash[17473]: cluster 2026-03-10T08:44:27.852812+0000 mgr.vm02.ttibzz (mgr.14195) 453 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:30.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:29 vm02 bash[17473]: audit 2026-03-10T08:44:29.888235+0000 mon.vm02 (mon.0) 902 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:44:30.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:29 vm02 bash[17473]: audit 2026-03-10T08:44:29.888235+0000 mon.vm02 (mon.0) 902 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:44:31.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:30 vm02 bash[17473]: cluster 2026-03-10T08:44:29.853160+0000 mgr.vm02.ttibzz (mgr.14195) 454 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:31.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:30 vm02 bash[17473]: cluster 2026-03-10T08:44:29.853160+0000 mgr.vm02.ttibzz (mgr.14195) 454 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:32.582 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:32.743 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:32.743 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 63s ago 5m - - 2026-03-10T08:44:32.743 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 63s ago 5m 111M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:32.743 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 63s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:32.743 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 63s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:32.924 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:32.924 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:32.924 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:32 vm02 bash[17473]: cluster 2026-03-10T08:44:31.853573+0000 mgr.vm02.ttibzz (mgr.14195) 455 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:32 vm02 bash[17473]: cluster 2026-03-10T08:44:31.853573+0000 mgr.vm02.ttibzz (mgr.14195) 455 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:32 vm02 bash[17473]: audit 2026-03-10T08:44:32.927869+0000 mon.vm02 (mon.0) 903 : audit [DBG] from='client.? 192.168.123.102:0/4175675933' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:33.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:32 vm02 bash[17473]: audit 2026-03-10T08:44:32.927869+0000 mon.vm02 (mon.0) 903 : audit [DBG] from='client.? 192.168.123.102:0/4175675933' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:33 vm02 bash[17473]: audit 2026-03-10T08:44:32.571346+0000 mgr.vm02.ttibzz (mgr.14195) 456 : audit [DBG] from='client.15232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:33 vm02 bash[17473]: audit 2026-03-10T08:44:32.571346+0000 mgr.vm02.ttibzz (mgr.14195) 456 : audit [DBG] from='client.15232 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:33 vm02 bash[17473]: audit 2026-03-10T08:44:32.743830+0000 mgr.vm02.ttibzz (mgr.14195) 457 : audit [DBG] from='client.24799 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:33 vm02 bash[17473]: audit 2026-03-10T08:44:32.743830+0000 mgr.vm02.ttibzz (mgr.14195) 457 : audit [DBG] from='client.24799 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:33 vm02 bash[17473]: cluster 2026-03-10T08:44:33.853967+0000 mgr.vm02.ttibzz (mgr.14195) 458 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:34.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:33 vm02 bash[17473]: cluster 2026-03-10T08:44:33.853967+0000 mgr.vm02.ttibzz (mgr.14195) 458 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:35.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:34 vm02 bash[17473]: audit 2026-03-10T08:44:34.242483+0000 mon.vm02 (mon.0) 904 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:35.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:34 vm02 bash[17473]: audit 2026-03-10T08:44:34.242483+0000 mon.vm02 (mon.0) 904 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.337706+0000 mon.vm02 (mon.0) 905 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.337706+0000 mon.vm02 (mon.0) 905 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.342464+0000 mon.vm02 (mon.0) 906 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.342464+0000 mon.vm02 (mon.0) 906 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.631271+0000 mon.vm02 (mon.0) 907 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.631271+0000 mon.vm02 (mon.0) 907 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.631764+0000 mon.vm02 (mon.0) 908 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.631764+0000 mon.vm02 (mon.0) 908 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: cluster 2026-03-10T08:44:35.632617+0000 mgr.vm02.ttibzz (mgr.14195) 459 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: cluster 2026-03-10T08:44:35.632617+0000 mgr.vm02.ttibzz (mgr.14195) 459 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: cluster 2026-03-10T08:44:35.632735+0000 mgr.vm02.ttibzz (mgr.14195) 460 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: cluster 2026-03-10T08:44:35.632735+0000 mgr.vm02.ttibzz (mgr.14195) 460 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.636639+0000 mon.vm02 (mon.0) 909 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.636639+0000 mon.vm02 (mon.0) 909 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.637896+0000 mon.vm02 (mon.0) 910 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:44:36.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:36 vm02 bash[17473]: audit 2026-03-10T08:44:35.637896+0000 mon.vm02 (mon.0) 910 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:44:38.090 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:38.241 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:38.241 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 2s ago 5m - - 2026-03-10T08:44:38.241 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 2s ago 5m 116M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:38.241 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 68s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:38.241 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 68s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:38.433 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:38.433 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:38.434 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:39.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:38 vm02 bash[17473]: cluster 2026-03-10T08:44:37.633151+0000 mgr.vm02.ttibzz (mgr.14195) 461 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:39.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:38 vm02 bash[17473]: cluster 2026-03-10T08:44:37.633151+0000 mgr.vm02.ttibzz (mgr.14195) 461 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:39.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:38 vm02 bash[17473]: audit 2026-03-10T08:44:38.437297+0000 mon.vm02 (mon.0) 911 : audit [DBG] from='client.? 192.168.123.102:0/3394853844' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:39.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:38 vm02 bash[17473]: audit 2026-03-10T08:44:38.437297+0000 mon.vm02 (mon.0) 911 : audit [DBG] from='client.? 192.168.123.102:0/3394853844' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:40.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:39 vm02 bash[17473]: audit 2026-03-10T08:44:38.080186+0000 mgr.vm02.ttibzz (mgr.14195) 462 : audit [DBG] from='client.24801 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:40.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:39 vm02 bash[17473]: audit 2026-03-10T08:44:38.080186+0000 mgr.vm02.ttibzz (mgr.14195) 462 : audit [DBG] from='client.24801 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:40.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:39 vm02 bash[17473]: audit 2026-03-10T08:44:38.241824+0000 mgr.vm02.ttibzz (mgr.14195) 463 : audit [DBG] from='client.15248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:40.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:39 vm02 bash[17473]: audit 2026-03-10T08:44:38.241824+0000 mgr.vm02.ttibzz (mgr.14195) 463 : audit [DBG] from='client.15248 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:41.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:40 vm02 bash[17473]: cluster 2026-03-10T08:44:39.633576+0000 mgr.vm02.ttibzz (mgr.14195) 464 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:41.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:40 vm02 bash[17473]: cluster 2026-03-10T08:44:39.633576+0000 mgr.vm02.ttibzz (mgr.14195) 464 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:43.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:42 vm02 bash[17473]: cluster 2026-03-10T08:44:41.634091+0000 mgr.vm02.ttibzz (mgr.14195) 465 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:43.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:42 vm02 bash[17473]: cluster 2026-03-10T08:44:41.634091+0000 mgr.vm02.ttibzz (mgr.14195) 465 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:43.624 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to stop 2026-03-10T08:44:43.782 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:43.782 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 8s ago 5m - - 2026-03-10T08:44:43.782 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 8s ago 5m 116M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:43.782 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 74s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:43.782 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 74s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:43.966 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:43.966 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:43.966 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: audit 2026-03-10T08:44:43.613259+0000 mgr.vm02.ttibzz (mgr.14195) 466 : audit [DBG] from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: audit 2026-03-10T08:44:43.613259+0000 mgr.vm02.ttibzz (mgr.14195) 466 : audit [DBG] from='client.15256 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: cluster 2026-03-10T08:44:43.634543+0000 mgr.vm02.ttibzz (mgr.14195) 467 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: cluster 2026-03-10T08:44:43.634543+0000 mgr.vm02.ttibzz (mgr.14195) 467 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: audit 2026-03-10T08:44:43.783543+0000 mgr.vm02.ttibzz (mgr.14195) 468 : audit [DBG] from='client.15260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: audit 2026-03-10T08:44:43.783543+0000 mgr.vm02.ttibzz (mgr.14195) 468 : audit [DBG] from='client.15260 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: audit 2026-03-10T08:44:43.969623+0000 mon.vm02 (mon.0) 912 : audit [DBG] from='client.? 192.168.123.102:0/194708285' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:45.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:44 vm02 bash[17473]: audit 2026-03-10T08:44:43.969623+0000 mon.vm02 (mon.0) 912 : audit [DBG] from='client.? 192.168.123.102:0/194708285' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:46.261 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:44:46.261 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:44:46.261 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T08:44:46.424 INFO:teuthology.orchestra.run.vm02.stdout:anonymousScheduled to start rgw.foo.vm02.bmgnwf on host 'vm02' 2026-03-10T08:44:46.639 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to start 2026-03-10T08:44:46.809 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:46.809 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 11s ago 5m - - 2026-03-10T08:44:46.809 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 11s ago 5m 116M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:46.809 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 77s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:46.809 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 77s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:46.994 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:46.994 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:46.994 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: cluster 2026-03-10T08:44:45.634935+0000 mgr.vm02.ttibzz (mgr.14195) 469 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: cluster 2026-03-10T08:44:45.634935+0000 mgr.vm02.ttibzz (mgr.14195) 469 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 307 B/s rd, 614 B/s wr, 0 op/s 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.421811+0000 mon.vm02 (mon.0) 913 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.421811+0000 mon.vm02 (mon.0) 913 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.427517+0000 mon.vm02 (mon.0) 914 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.427517+0000 mon.vm02 (mon.0) 914 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.431263+0000 mon.vm02 (mon.0) 915 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.431263+0000 mon.vm02 (mon.0) 915 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.432858+0000 mon.vm02 (mon.0) 916 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:44:47.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.432858+0000 mon.vm02 (mon.0) 916 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:44:47.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.433544+0000 mon.vm02 (mon.0) 917 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:44:47.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.433544+0000 mon.vm02 (mon.0) 917 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:44:47.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.440907+0000 mon.vm02 (mon.0) 918 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:47.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.440907+0000 mon.vm02 (mon.0) 918 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:47.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.443167+0000 mon.vm02 (mon.0) 919 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:44:47.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:46 vm02 bash[17473]: audit 2026-03-10T08:44:46.443167+0000 mon.vm02 (mon.0) 919 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.416340+0000 mgr.vm02.ttibzz (mgr.14195) 470 : audit [DBG] from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm02.bmgnwf", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.416340+0000 mgr.vm02.ttibzz (mgr.14195) 470 : audit [DBG] from='client.15268 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm02.bmgnwf", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: cephadm 2026-03-10T08:44:46.416650+0000 mgr.vm02.ttibzz (mgr.14195) 471 : cephadm [INF] Schedule start daemon rgw.foo.vm02.bmgnwf 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: cephadm 2026-03-10T08:44:46.416650+0000 mgr.vm02.ttibzz (mgr.14195) 471 : cephadm [INF] Schedule start daemon rgw.foo.vm02.bmgnwf 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: cluster 2026-03-10T08:44:46.434560+0000 mgr.vm02.ttibzz (mgr.14195) 472 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 284 B/s rd, 568 B/s wr, 0 op/s 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: cluster 2026-03-10T08:44:46.434560+0000 mgr.vm02.ttibzz (mgr.14195) 472 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 284 B/s rd, 568 B/s wr, 0 op/s 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: cluster 2026-03-10T08:44:46.434661+0000 mgr.vm02.ttibzz (mgr.14195) 473 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: cluster 2026-03-10T08:44:46.434661+0000 mgr.vm02.ttibzz (mgr.14195) 473 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.621783+0000 mgr.vm02.ttibzz (mgr.14195) 474 : audit [DBG] from='client.15272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.621783+0000 mgr.vm02.ttibzz (mgr.14195) 474 : audit [DBG] from='client.15272 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.809726+0000 mgr.vm02.ttibzz (mgr.14195) 475 : audit [DBG] from='client.15276 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.809726+0000 mgr.vm02.ttibzz (mgr.14195) 475 : audit [DBG] from='client.15276 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.997383+0000 mon.vm02 (mon.0) 920 : audit [DBG] from='client.? 192.168.123.102:0/3559036944' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:48.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:47 vm02 bash[17473]: audit 2026-03-10T08:44:46.997383+0000 mon.vm02 (mon.0) 920 : audit [DBG] from='client.? 192.168.123.102:0/3559036944' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:49 vm02 bash[17473]: cluster 2026-03-10T08:44:48.435098+0000 mgr.vm02.ttibzz (mgr.14195) 476 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:49 vm02 bash[17473]: cluster 2026-03-10T08:44:48.435098+0000 mgr.vm02.ttibzz (mgr.14195) 476 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:49 vm02 bash[17473]: audit 2026-03-10T08:44:49.242791+0000 mon.vm02 (mon.0) 921 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:49 vm02 bash[17473]: audit 2026-03-10T08:44:49.242791+0000 mon.vm02 (mon.0) 921 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:44:52.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:51 vm02 bash[17473]: cluster 2026-03-10T08:44:50.435578+0000 mgr.vm02.ttibzz (mgr.14195) 477 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:52.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:51 vm02 bash[17473]: cluster 2026-03-10T08:44:50.435578+0000 mgr.vm02.ttibzz (mgr.14195) 477 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:52.168 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to start 2026-03-10T08:44:52.320 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:52.320 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 16s ago 5m - - 2026-03-10T08:44:52.320 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 16s ago 5m 116M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:52.320 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 83s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:52.320 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 83s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:52.503 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:52.503 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:52.503 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:53.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:52 vm02 bash[17473]: audit 2026-03-10T08:44:52.503742+0000 mon.vm07 (mon.1) 38 : audit [DBG] from='client.? 192.168.123.102:0/3091052019' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:53.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:52 vm02 bash[17473]: audit 2026-03-10T08:44:52.503742+0000 mon.vm07 (mon.1) 38 : audit [DBG] from='client.? 192.168.123.102:0/3091052019' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:54.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:53 vm02 bash[17473]: audit 2026-03-10T08:44:52.157141+0000 mgr.vm02.ttibzz (mgr.14195) 478 : audit [DBG] from='client.15284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:54.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:53 vm02 bash[17473]: audit 2026-03-10T08:44:52.157141+0000 mgr.vm02.ttibzz (mgr.14195) 478 : audit [DBG] from='client.15284 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:54.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:53 vm02 bash[17473]: audit 2026-03-10T08:44:52.321418+0000 mgr.vm02.ttibzz (mgr.14195) 479 : audit [DBG] from='client.15288 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:54.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:53 vm02 bash[17473]: audit 2026-03-10T08:44:52.321418+0000 mgr.vm02.ttibzz (mgr.14195) 479 : audit [DBG] from='client.15288 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:54.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:53 vm02 bash[17473]: cluster 2026-03-10T08:44:52.435922+0000 mgr.vm02.ttibzz (mgr.14195) 480 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:54.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:53 vm02 bash[17473]: cluster 2026-03-10T08:44:52.435922+0000 mgr.vm02.ttibzz (mgr.14195) 480 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 349 B/s rd, 698 B/s wr, 1 op/s 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: cluster 2026-03-10T08:44:54.436254+0000 mgr.vm02.ttibzz (mgr.14195) 481 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: cluster 2026-03-10T08:44:54.436254+0000 mgr.vm02.ttibzz (mgr.14195) 481 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: audit 2026-03-10T08:44:55.346809+0000 mon.vm02 (mon.0) 922 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: audit 2026-03-10T08:44:55.346809+0000 mon.vm02 (mon.0) 922 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: audit 2026-03-10T08:44:55.367771+0000 mon.vm02 (mon.0) 923 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: audit 2026-03-10T08:44:55.367771+0000 mon.vm02 (mon.0) 923 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: audit 2026-03-10T08:44:55.405485+0000 mon.vm02 (mon.0) 924 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:44:56.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:55 vm02 bash[17473]: audit 2026-03-10T08:44:55.405485+0000 mon.vm02 (mon.0) 924 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:44:57.678 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.bmgnwf to start 2026-03-10T08:44:57.834 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:44:57.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 error 22s ago 5m - - 2026-03-10T08:44:57.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 22s ago 5m 116M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:44:57.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 88s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:44:57.834 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 88s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:44:58.022 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:44:58.022 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:44:58.022 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm02.bmgnwf on vm02 is in error state 2026-03-10T08:44:58.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:57 vm02 bash[17473]: cluster 2026-03-10T08:44:56.436742+0000 mgr.vm02.ttibzz (mgr.14195) 482 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-10T08:44:58.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:57 vm02 bash[17473]: cluster 2026-03-10T08:44:56.436742+0000 mgr.vm02.ttibzz (mgr.14195) 482 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 454 KiB data, 226 MiB used, 160 GiB / 160 GiB avail; 3.4 KiB/s rd, 0 B/s wr, 4 op/s 2026-03-10T08:44:59.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:58 vm02 bash[17473]: audit 2026-03-10T08:44:57.666990+0000 mgr.vm02.ttibzz (mgr.14195) 483 : audit [DBG] from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:59.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:58 vm02 bash[17473]: audit 2026-03-10T08:44:57.666990+0000 mgr.vm02.ttibzz (mgr.14195) 483 : audit [DBG] from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:59.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:58 vm02 bash[17473]: audit 2026-03-10T08:44:57.834258+0000 mgr.vm02.ttibzz (mgr.14195) 484 : audit [DBG] from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:59.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:58 vm02 bash[17473]: audit 2026-03-10T08:44:57.834258+0000 mgr.vm02.ttibzz (mgr.14195) 484 : audit [DBG] from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:44:59.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:58 vm02 bash[17473]: audit 2026-03-10T08:44:58.025440+0000 mon.vm02 (mon.0) 925 : audit [DBG] from='client.? 192.168.123.102:0/2985502216' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:44:59.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:58 vm02 bash[17473]: audit 2026-03-10T08:44:58.025440+0000 mon.vm02 (mon.0) 925 : audit [DBG] from='client.? 192.168.123.102:0/2985502216' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:00.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:59 vm02 bash[17473]: cluster 2026-03-10T08:44:58.437216+0000 mgr.vm02.ttibzz (mgr.14195) 485 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 454 KiB data, 239 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 34 op/s 2026-03-10T08:45:00.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:44:59 vm02 bash[17473]: cluster 2026-03-10T08:44:58.437216+0000 mgr.vm02.ttibzz (mgr.14195) 485 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 454 KiB data, 239 MiB used, 160 GiB / 160 GiB avail; 22 KiB/s rd, 0 B/s wr, 34 op/s 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.052572+0000 mon.vm02 (mon.0) 926 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.052572+0000 mon.vm02 (mon.0) 926 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.056692+0000 mon.vm02 (mon.0) 927 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.056692+0000 mon.vm02 (mon.0) 927 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.057606+0000 mon.vm02 (mon.0) 928 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.057606+0000 mon.vm02 (mon.0) 928 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.058002+0000 mon.vm02 (mon.0) 929 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.058002+0000 mon.vm02 (mon.0) 929 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: cluster 2026-03-10T08:45:00.058800+0000 mgr.vm02.ttibzz (mgr.14195) 486 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 454 KiB data, 239 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 46 op/s 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: cluster 2026-03-10T08:45:00.058800+0000 mgr.vm02.ttibzz (mgr.14195) 486 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 454 KiB data, 239 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 46 op/s 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.061586+0000 mon.vm02 (mon.0) 930 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.061586+0000 mon.vm02 (mon.0) 930 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.063033+0000 mon.vm02 (mon.0) 931 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:01.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:01 vm02 bash[17473]: audit 2026-03-10T08:45:00.063033+0000 mon.vm02 (mon.0) 931 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:02.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:02 vm02 bash[17473]: cluster 2026-03-10T08:45:01.056664+0000 mon.vm02 (mon.0) 932 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T08:45:02.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:02 vm02 bash[17473]: cluster 2026-03-10T08:45:01.056664+0000 mon.vm02 (mon.0) 932 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T08:45:02.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:02 vm02 bash[17473]: cluster 2026-03-10T08:45:01.056675+0000 mon.vm02 (mon.0) 933 : cluster [INF] Cluster is now healthy 2026-03-10T08:45:02.533 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:02 vm02 bash[17473]: cluster 2026-03-10T08:45:01.056675+0000 mon.vm02 (mon.0) 933 : cluster [INF] Cluster is now healthy 2026-03-10T08:45:03.184 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7s) 3s ago 5m 89.0M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:03.335 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled to stop rgw.foo.vm02.rugqqv on host 'vm02' 2026-03-10T08:45:03.526 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.rugqqv to stop 2026-03-10T08:45:03.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:03 vm02 bash[17473]: cluster 2026-03-10T08:45:02.059225+0000 mgr.vm02.ttibzz (mgr.14195) 487 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 0 B/s wr, 84 op/s 2026-03-10T08:45:03.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:03 vm02 bash[17473]: cluster 2026-03-10T08:45:02.059225+0000 mgr.vm02.ttibzz (mgr.14195) 487 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 0 B/s wr, 84 op/s 2026-03-10T08:45:03.683 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:03.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8s) 3s ago 5m 89.0M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:03.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 3s ago 6m 117M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:45:03.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 94s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:03.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (5m) 94s ago 5m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:03.885 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.175354+0000 mgr.vm02.ttibzz (mgr.14195) 488 : audit [DBG] from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.175354+0000 mgr.vm02.ttibzz (mgr.14195) 488 : audit [DBG] from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.329588+0000 mgr.vm02.ttibzz (mgr.14195) 489 : audit [DBG] from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm02.rugqqv", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.329588+0000 mgr.vm02.ttibzz (mgr.14195) 489 : audit [DBG] from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm02.rugqqv", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: cephadm 2026-03-10T08:45:03.329999+0000 mgr.vm02.ttibzz (mgr.14195) 490 : cephadm [INF] Schedule stop daemon rgw.foo.vm02.rugqqv 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: cephadm 2026-03-10T08:45:03.329999+0000 mgr.vm02.ttibzz (mgr.14195) 490 : cephadm [INF] Schedule stop daemon rgw.foo.vm02.rugqqv 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.334760+0000 mon.vm02 (mon.0) 934 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.334760+0000 mon.vm02 (mon.0) 934 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.338279+0000 mon.vm02 (mon.0) 935 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.338279+0000 mon.vm02 (mon.0) 935 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.339013+0000 mon.vm02 (mon.0) 936 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.339013+0000 mon.vm02 (mon.0) 936 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.340161+0000 mon.vm02 (mon.0) 937 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.340161+0000 mon.vm02 (mon.0) 937 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.340620+0000 mon.vm02 (mon.0) 938 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.340620+0000 mon.vm02 (mon.0) 938 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.344071+0000 mon.vm02 (mon.0) 939 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.344071+0000 mon.vm02 (mon.0) 939 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.345457+0000 mon.vm02 (mon.0) 940 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.345457+0000 mon.vm02 (mon.0) 940 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.506451+0000 mgr.vm02.ttibzz (mgr.14195) 491 : audit [DBG] from='client.15326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.506451+0000 mgr.vm02.ttibzz (mgr.14195) 491 : audit [DBG] from='client.15326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.684557+0000 mgr.vm02.ttibzz (mgr.14195) 492 : audit [DBG] from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.684557+0000 mgr.vm02.ttibzz (mgr.14195) 492 : audit [DBG] from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.888876+0000 mon.vm02 (mon.0) 941 : audit [DBG] from='client.? 192.168.123.102:0/2680694471' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:03.888876+0000 mon.vm02 (mon.0) 941 : audit [DBG] from='client.? 192.168.123.102:0/2680694471' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:04.246623+0000 mon.vm02 (mon.0) 942 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:04.246623+0000 mon.vm02 (mon.0) 942 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:04.247368+0000 mon.vm02 (mon.0) 943 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:04.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:04 vm02 bash[17473]: audit 2026-03-10T08:45:04.247368+0000 mon.vm02 (mon.0) 943 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:05.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:05 vm02 bash[17473]: cluster 2026-03-10T08:45:04.059629+0000 mgr.vm02.ttibzz (mgr.14195) 493 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 0 B/s wr, 84 op/s 2026-03-10T08:45:05.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:05 vm02 bash[17473]: cluster 2026-03-10T08:45:04.059629+0000 mgr.vm02.ttibzz (mgr.14195) 493 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 0 B/s wr, 84 op/s 2026-03-10T08:45:07.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:07 vm02 bash[17473]: cluster 2026-03-10T08:45:06.059965+0000 mgr.vm02.ttibzz (mgr.14195) 494 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 0 B/s wr, 84 op/s 2026-03-10T08:45:07.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:07 vm02 bash[17473]: cluster 2026-03-10T08:45:06.059965+0000 mgr.vm02.ttibzz (mgr.14195) 494 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 55 KiB/s rd, 0 B/s wr, 84 op/s 2026-03-10T08:45:09.045 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.rugqqv to stop 2026-03-10T08:45:09.194 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:09.194 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (13s) 9s ago 6m 89.0M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:09.194 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 9s ago 6m 117M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:45:09.194 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 99s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:09.194 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 99s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:09.383 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:09.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:09 vm02 bash[17473]: cluster 2026-03-10T08:45:08.060330+0000 mgr.vm02.ttibzz (mgr.14195) 495 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 0 B/s wr, 80 op/s 2026-03-10T08:45:09.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:09 vm02 bash[17473]: cluster 2026-03-10T08:45:08.060330+0000 mgr.vm02.ttibzz (mgr.14195) 495 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 0 B/s wr, 80 op/s 2026-03-10T08:45:10.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:10 vm02 bash[17473]: audit 2026-03-10T08:45:09.035445+0000 mgr.vm02.ttibzz (mgr.14195) 496 : audit [DBG] from='client.15338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:10.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:10 vm02 bash[17473]: audit 2026-03-10T08:45:09.035445+0000 mgr.vm02.ttibzz (mgr.14195) 496 : audit [DBG] from='client.15338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:10.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:10 vm02 bash[17473]: audit 2026-03-10T08:45:09.195363+0000 mgr.vm02.ttibzz (mgr.14195) 497 : audit [DBG] from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:10.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:10 vm02 bash[17473]: audit 2026-03-10T08:45:09.195363+0000 mgr.vm02.ttibzz (mgr.14195) 497 : audit [DBG] from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:10.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:10 vm02 bash[17473]: audit 2026-03-10T08:45:09.386597+0000 mon.vm02 (mon.0) 944 : audit [DBG] from='client.? 192.168.123.102:0/1866723244' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:10.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:10 vm02 bash[17473]: audit 2026-03-10T08:45:09.386597+0000 mon.vm02 (mon.0) 944 : audit [DBG] from='client.? 192.168.123.102:0/1866723244' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:11.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:11 vm02 bash[17473]: cluster 2026-03-10T08:45:10.060775+0000 mgr.vm02.ttibzz (mgr.14195) 498 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 176 B/s wr, 49 op/s 2026-03-10T08:45:11.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:11 vm02 bash[17473]: cluster 2026-03-10T08:45:10.060775+0000 mgr.vm02.ttibzz (mgr.14195) 498 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 176 B/s wr, 49 op/s 2026-03-10T08:45:13.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:13 vm02 bash[17473]: cluster 2026-03-10T08:45:12.061231+0000 mgr.vm02.ttibzz (mgr.14195) 499 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 170 B/s wr, 37 op/s 2026-03-10T08:45:13.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:13 vm02 bash[17473]: cluster 2026-03-10T08:45:12.061231+0000 mgr.vm02.ttibzz (mgr.14195) 499 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 170 B/s wr, 37 op/s 2026-03-10T08:45:14.544 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.rugqqv to stop 2026-03-10T08:45:14.685 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:14.685 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (19s) 14s ago 6m 89.0M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:14.686 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 14s ago 6m 117M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:45:14.686 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 105s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:14.686 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 105s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:14.860 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: cluster 2026-03-10T08:45:14.061646+0000 mgr.vm02.ttibzz (mgr.14195) 500 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: cluster 2026-03-10T08:45:14.061646+0000 mgr.vm02.ttibzz (mgr.14195) 500 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:15.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: audit 2026-03-10T08:45:14.535235+0000 mgr.vm02.ttibzz (mgr.14195) 501 : audit [DBG] from='client.15350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:15.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: audit 2026-03-10T08:45:14.535235+0000 mgr.vm02.ttibzz (mgr.14195) 501 : audit [DBG] from='client.15350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:15.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: audit 2026-03-10T08:45:14.687161+0000 mgr.vm02.ttibzz (mgr.14195) 502 : audit [DBG] from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:15.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: audit 2026-03-10T08:45:14.687161+0000 mgr.vm02.ttibzz (mgr.14195) 502 : audit [DBG] from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:15.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: audit 2026-03-10T08:45:14.863988+0000 mon.vm02 (mon.0) 945 : audit [DBG] from='client.? 192.168.123.102:0/3847582621' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:15.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:15 vm02 bash[17473]: audit 2026-03-10T08:45:14.863988+0000 mon.vm02 (mon.0) 945 : audit [DBG] from='client.? 192.168.123.102:0/3847582621' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:17.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: cluster 2026-03-10T08:45:16.061977+0000 mgr.vm02.ttibzz (mgr.14195) 503 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:17.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: cluster 2026-03-10T08:45:16.061977+0000 mgr.vm02.ttibzz (mgr.14195) 503 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:17.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: audit 2026-03-10T08:45:16.461003+0000 mon.vm02 (mon.0) 946 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:17.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: audit 2026-03-10T08:45:16.461003+0000 mon.vm02 (mon.0) 946 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:17.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: audit 2026-03-10T08:45:16.465461+0000 mon.vm02 (mon.0) 947 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:17.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: audit 2026-03-10T08:45:16.465461+0000 mon.vm02 (mon.0) 947 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:17.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: audit 2026-03-10T08:45:16.492638+0000 mon.vm02 (mon.0) 948 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:17.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:17 vm02 bash[17473]: audit 2026-03-10T08:45:16.492638+0000 mon.vm02 (mon.0) 948 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:19.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:19 vm02 bash[17473]: cluster 2026-03-10T08:45:18.062318+0000 mgr.vm02.ttibzz (mgr.14195) 504 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:19.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:19 vm02 bash[17473]: cluster 2026-03-10T08:45:18.062318+0000 mgr.vm02.ttibzz (mgr.14195) 504 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:19.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:19 vm02 bash[17473]: audit 2026-03-10T08:45:19.244527+0000 mon.vm02 (mon.0) 949 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:19.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:19 vm02 bash[17473]: audit 2026-03-10T08:45:19.244527+0000 mon.vm02 (mon.0) 949 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:20.021 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.rugqqv to stop 2026-03-10T08:45:20.159 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:20.160 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (24s) 20s ago 6m 89.0M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:20.160 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 20s ago 6m 117M - 19.2.3-678-ge911bdeb 654f31e6858e 0471928b991a 2026-03-10T08:45:20.160 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 110s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:20.160 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 110s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:20.335 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:20.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:20 vm02 bash[17473]: audit 2026-03-10T08:45:20.338532+0000 mon.vm02 (mon.0) 950 : audit [DBG] from='client.? 192.168.123.102:0/2962446512' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:20.783 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:20 vm02 bash[17473]: audit 2026-03-10T08:45:20.338532+0000 mon.vm02 (mon.0) 950 : audit [DBG] from='client.? 192.168.123.102:0/2962446512' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:21.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:21 vm02 bash[17473]: audit 2026-03-10T08:45:20.011245+0000 mgr.vm02.ttibzz (mgr.14195) 505 : audit [DBG] from='client.15362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:21.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:21 vm02 bash[17473]: audit 2026-03-10T08:45:20.011245+0000 mgr.vm02.ttibzz (mgr.14195) 505 : audit [DBG] from='client.15362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:21.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:21 vm02 bash[17473]: cluster 2026-03-10T08:45:20.062727+0000 mgr.vm02.ttibzz (mgr.14195) 506 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:21.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:21 vm02 bash[17473]: cluster 2026-03-10T08:45:20.062727+0000 mgr.vm02.ttibzz (mgr.14195) 506 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:21.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:21 vm02 bash[17473]: audit 2026-03-10T08:45:20.161118+0000 mgr.vm02.ttibzz (mgr.14195) 507 : audit [DBG] from='client.15366 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:21.785 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:21 vm02 bash[17473]: audit 2026-03-10T08:45:20.161118+0000 mgr.vm02.ttibzz (mgr.14195) 507 : audit [DBG] from='client.15366 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.949837+0000 mon.vm02 (mon.0) 951 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.949837+0000 mon.vm02 (mon.0) 951 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.954636+0000 mon.vm02 (mon.0) 952 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.954636+0000 mon.vm02 (mon.0) 952 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.955907+0000 mon.vm02 (mon.0) 953 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.955907+0000 mon.vm02 (mon.0) 953 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.956608+0000 mon.vm02 (mon.0) 954 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.956608+0000 mon.vm02 (mon.0) 954 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.960490+0000 mon.vm02 (mon.0) 955 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.960490+0000 mon.vm02 (mon.0) 955 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.962256+0000 mon.vm02 (mon.0) 956 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:23.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:22 vm02 bash[17473]: audit 2026-03-10T08:45:21.962256+0000 mon.vm02 (mon.0) 956 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:24.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:23 vm02 bash[17473]: cluster 2026-03-10T08:45:22.063136+0000 mgr.vm02.ttibzz (mgr.14195) 508 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:45:24.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:23 vm02 bash[17473]: cluster 2026-03-10T08:45:22.063136+0000 mgr.vm02.ttibzz (mgr.14195) 508 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:45:25.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:25 vm02 bash[17473]: cluster 2026-03-10T08:45:24.063552+0000 mgr.vm02.ttibzz (mgr.14195) 509 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:45:25.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:25 vm02 bash[17473]: cluster 2026-03-10T08:45:24.063552+0000 mgr.vm02.ttibzz (mgr.14195) 509 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:45:25.510 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 stopped 3s ago 6m - - 2026-03-10T08:45:25.514 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:45:25.514 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:45:25.515 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T08:45:25.699 INFO:teuthology.orchestra.run.vm02.stdout:anonymousScheduled to start rgw.foo.vm02.rugqqv on host 'vm02' 2026-03-10T08:45:25.891 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.rugqqv to start 2026-03-10T08:45:26.070 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:26.070 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (30s) 4s ago 6m 91.9M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:26.070 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 stopped 4s ago 6m - - 2026-03-10T08:45:26.070 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 116s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:26.070 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 116s ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:26.269 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.498750+0000 mgr.vm02.ttibzz (mgr.14195) 510 : audit [DBG] from='client.15374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.498750+0000 mgr.vm02.ttibzz (mgr.14195) 510 : audit [DBG] from='client.15374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.676929+0000 mgr.vm02.ttibzz (mgr.14195) 511 : audit [DBG] from='client.15378 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm02.rugqqv", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.676929+0000 mgr.vm02.ttibzz (mgr.14195) 511 : audit [DBG] from='client.15378 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm02.rugqqv", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: cephadm 2026-03-10T08:45:25.677242+0000 mgr.vm02.ttibzz (mgr.14195) 512 : cephadm [INF] Schedule start daemon rgw.foo.vm02.rugqqv 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: cephadm 2026-03-10T08:45:25.677242+0000 mgr.vm02.ttibzz (mgr.14195) 512 : cephadm [INF] Schedule start daemon rgw.foo.vm02.rugqqv 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.691547+0000 mon.vm02 (mon.0) 957 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.691547+0000 mon.vm02 (mon.0) 957 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.697154+0000 mon.vm02 (mon.0) 958 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.697154+0000 mon.vm02 (mon.0) 958 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.698038+0000 mon.vm02 (mon.0) 959 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.698038+0000 mon.vm02 (mon.0) 959 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.874733+0000 mgr.vm02.ttibzz (mgr.14195) 513 : audit [DBG] from='client.24887 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.874733+0000 mgr.vm02.ttibzz (mgr.14195) 513 : audit [DBG] from='client.24887 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.991855+0000 mon.vm02 (mon.0) 960 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.991855+0000 mon.vm02 (mon.0) 960 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.997061+0000 mon.vm02 (mon.0) 961 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.997061+0000 mon.vm02 (mon.0) 961 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.997900+0000 mon.vm02 (mon.0) 962 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.997900+0000 mon.vm02 (mon.0) 962 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.998314+0000 mon.vm02 (mon.0) 963 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:25.998314+0000 mon.vm02 (mon.0) 963 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:26.001754+0000 mon.vm02 (mon.0) 964 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:26.001754+0000 mon.vm02 (mon.0) 964 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:27.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:26.002918+0000 mon.vm02 (mon.0) 965 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:27.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:26.002918+0000 mon.vm02 (mon.0) 965 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:27.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:26.273319+0000 mon.vm02 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.102:0/1974890859' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:27.034 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:26 vm02 bash[17473]: audit 2026-03-10T08:45:26.273319+0000 mon.vm02 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.102:0/1974890859' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:28.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:27 vm02 bash[17473]: audit 2026-03-10T08:45:26.063285+0000 mgr.vm02.ttibzz (mgr.14195) 514 : audit [DBG] from='client.15386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:28.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:27 vm02 bash[17473]: audit 2026-03-10T08:45:26.063285+0000 mgr.vm02.ttibzz (mgr.14195) 514 : audit [DBG] from='client.15386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:28.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:27 vm02 bash[17473]: cluster 2026-03-10T08:45:26.065034+0000 mgr.vm02.ttibzz (mgr.14195) 515 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:45:28.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:27 vm02 bash[17473]: cluster 2026-03-10T08:45:26.065034+0000 mgr.vm02.ttibzz (mgr.14195) 515 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:45:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:29 vm02 bash[17473]: cluster 2026-03-10T08:45:28.065453+0000 mgr.vm02.ttibzz (mgr.14195) 516 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:45:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:29 vm02 bash[17473]: cluster 2026-03-10T08:45:28.065453+0000 mgr.vm02.ttibzz (mgr.14195) 516 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:45:31.452 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.rugqqv to start 2026-03-10T08:45:31.606 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:31.606 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (36s) 9s ago 6m 91.9M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:31.606 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 stopped 9s ago 6m - - 2026-03-10T08:45:31.606 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:31.606 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:31.794 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:32.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:31 vm02 bash[17473]: cluster 2026-03-10T08:45:30.065814+0000 mgr.vm02.ttibzz (mgr.14195) 517 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:32.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:31 vm02 bash[17473]: cluster 2026-03-10T08:45:30.065814+0000 mgr.vm02.ttibzz (mgr.14195) 517 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:33.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:32 vm02 bash[17473]: audit 2026-03-10T08:45:31.439497+0000 mgr.vm02.ttibzz (mgr.14195) 518 : audit [DBG] from='client.15394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:33.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:32 vm02 bash[17473]: audit 2026-03-10T08:45:31.439497+0000 mgr.vm02.ttibzz (mgr.14195) 518 : audit [DBG] from='client.15394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:33.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:32 vm02 bash[17473]: audit 2026-03-10T08:45:31.607146+0000 mgr.vm02.ttibzz (mgr.14195) 519 : audit [DBG] from='client.15398 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:33.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:32 vm02 bash[17473]: audit 2026-03-10T08:45:31.607146+0000 mgr.vm02.ttibzz (mgr.14195) 519 : audit [DBG] from='client.15398 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:33.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:32 vm02 bash[17473]: audit 2026-03-10T08:45:31.797402+0000 mon.vm02 (mon.0) 967 : audit [DBG] from='client.? 192.168.123.102:0/2229486988' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:33.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:32 vm02 bash[17473]: audit 2026-03-10T08:45:31.797402+0000 mon.vm02 (mon.0) 967 : audit [DBG] from='client.? 192.168.123.102:0/2229486988' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:34.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:33 vm02 bash[17473]: cluster 2026-03-10T08:45:32.066298+0000 mgr.vm02.ttibzz (mgr.14195) 520 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:34.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:33 vm02 bash[17473]: cluster 2026-03-10T08:45:32.066298+0000 mgr.vm02.ttibzz (mgr.14195) 520 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:35.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:34 vm02 bash[17473]: audit 2026-03-10T08:45:34.243431+0000 mon.vm02 (mon.0) 968 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:35.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:34 vm02 bash[17473]: audit 2026-03-10T08:45:34.243431+0000 mon.vm02 (mon.0) 968 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:36.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: cluster 2026-03-10T08:45:34.066717+0000 mgr.vm02.ttibzz (mgr.14195) 521 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: cluster 2026-03-10T08:45:34.066717+0000 mgr.vm02.ttibzz (mgr.14195) 521 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:45:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: audit 2026-03-10T08:45:34.879552+0000 mon.vm02 (mon.0) 969 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: audit 2026-03-10T08:45:34.879552+0000 mon.vm02 (mon.0) 969 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: audit 2026-03-10T08:45:34.909860+0000 mon.vm02 (mon.0) 970 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: audit 2026-03-10T08:45:34.909860+0000 mon.vm02 (mon.0) 970 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: audit 2026-03-10T08:45:34.949708+0000 mon.vm02 (mon.0) 971 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:36.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:35 vm02 bash[17473]: audit 2026-03-10T08:45:34.949708+0000 mon.vm02 (mon.0) 971 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:36.986 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm02.rugqqv to start 2026-03-10T08:45:37.144 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:37.145 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (41s) 15s ago 6m 91.9M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:37.145 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 stopped 15s ago 6m - - 2026-03-10T08:45:37.145 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:37.145 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:37.342 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:38.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:37 vm02 bash[17473]: cluster 2026-03-10T08:45:36.067106+0000 mgr.vm02.ttibzz (mgr.14195) 522 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 170 B/s wr, 11 op/s 2026-03-10T08:45:38.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:37 vm02 bash[17473]: cluster 2026-03-10T08:45:36.067106+0000 mgr.vm02.ttibzz (mgr.14195) 522 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 7.7 KiB/s rd, 170 B/s wr, 11 op/s 2026-03-10T08:45:38.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:37 vm02 bash[17473]: audit 2026-03-10T08:45:37.346318+0000 mon.vm02 (mon.0) 972 : audit [DBG] from='client.? 192.168.123.102:0/1341127662' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:38.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:37 vm02 bash[17473]: audit 2026-03-10T08:45:37.346318+0000 mon.vm02 (mon.0) 972 : audit [DBG] from='client.? 192.168.123.102:0/1341127662' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:38 vm02 bash[17473]: audit 2026-03-10T08:45:36.973977+0000 mgr.vm02.ttibzz (mgr.14195) 523 : audit [DBG] from='client.15414 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:38 vm02 bash[17473]: audit 2026-03-10T08:45:36.973977+0000 mgr.vm02.ttibzz (mgr.14195) 523 : audit [DBG] from='client.15414 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:38 vm02 bash[17473]: audit 2026-03-10T08:45:37.144968+0000 mgr.vm02.ttibzz (mgr.14195) 524 : audit [DBG] from='client.15418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:39.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:38 vm02 bash[17473]: audit 2026-03-10T08:45:37.144968+0000 mgr.vm02.ttibzz (mgr.14195) 524 : audit [DBG] from='client.15418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:40.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:39 vm02 bash[17473]: cluster 2026-03-10T08:45:38.067565+0000 mgr.vm02.ttibzz (mgr.14195) 525 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 170 B/s wr, 28 op/s 2026-03-10T08:45:40.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:39 vm02 bash[17473]: cluster 2026-03-10T08:45:38.067565+0000 mgr.vm02.ttibzz (mgr.14195) 525 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 18 KiB/s rd, 170 B/s wr, 28 op/s 2026-03-10T08:45:40.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:39 vm02 bash[17473]: audit 2026-03-10T08:45:39.652950+0000 mon.vm02 (mon.0) 973 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:40.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:39 vm02 bash[17473]: audit 2026-03-10T08:45:39.652950+0000 mon.vm02 (mon.0) 973 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:40.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:39 vm02 bash[17473]: audit 2026-03-10T08:45:39.658877+0000 mon.vm02 (mon.0) 974 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:40.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:39 vm02 bash[17473]: audit 2026-03-10T08:45:39.658877+0000 mon.vm02 (mon.0) 974 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:41.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.005626+0000 mon.vm02 (mon.0) 975 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:41.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.005626+0000 mon.vm02 (mon.0) 975 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:41.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.006728+0000 mon.vm02 (mon.0) 976 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:41.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.006728+0000 mon.vm02 (mon.0) 976 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:41.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.012944+0000 mon.vm02 (mon.0) 977 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:41.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.012944+0000 mon.vm02 (mon.0) 977 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:41.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.015513+0000 mon.vm02 (mon.0) 978 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:41.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:40 vm02 bash[17473]: audit 2026-03-10T08:45:40.015513+0000 mon.vm02 (mon.0) 978 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:42.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:41 vm02 bash[17473]: cluster 2026-03-10T08:45:40.067976+0000 mgr.vm02.ttibzz (mgr.14195) 526 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 170 B/s wr, 56 op/s 2026-03-10T08:45:42.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:41 vm02 bash[17473]: cluster 2026-03-10T08:45:40.067976+0000 mgr.vm02.ttibzz (mgr.14195) 526 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 37 KiB/s rd, 170 B/s wr, 56 op/s 2026-03-10T08:45:42.527 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7s) 2s ago 6m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:45:42.695 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled to stop rgw.foo.vm07.wecerd on host 'vm07' 2026-03-10T08:45:42.883 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:45:43.046 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:43.046 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (47s) 3s ago 6m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:43.046 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8s) 3s ago 6m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:45:43.046 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:43.046 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:43.240 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:44.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: cluster 2026-03-10T08:45:42.068403+0000 mgr.vm02.ttibzz (mgr.14195) 527 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 0 B/s wr, 82 op/s 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: cluster 2026-03-10T08:45:42.068403+0000 mgr.vm02.ttibzz (mgr.14195) 527 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 0 B/s wr, 82 op/s 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.516204+0000 mgr.vm02.ttibzz (mgr.14195) 528 : audit [DBG] from='client.15426 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.516204+0000 mgr.vm02.ttibzz (mgr.14195) 528 : audit [DBG] from='client.15426 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.686771+0000 mgr.vm02.ttibzz (mgr.14195) 529 : audit [DBG] from='client.15430 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm07.wecerd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.686771+0000 mgr.vm02.ttibzz (mgr.14195) 529 : audit [DBG] from='client.15430 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm07.wecerd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: cephadm 2026-03-10T08:45:42.687142+0000 mgr.vm02.ttibzz (mgr.14195) 530 : cephadm [INF] Schedule stop daemon rgw.foo.vm07.wecerd 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: cephadm 2026-03-10T08:45:42.687142+0000 mgr.vm02.ttibzz (mgr.14195) 530 : cephadm [INF] Schedule stop daemon rgw.foo.vm07.wecerd 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.692628+0000 mon.vm02 (mon.0) 979 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.692628+0000 mon.vm02 (mon.0) 979 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.698100+0000 mon.vm02 (mon.0) 980 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.698100+0000 mon.vm02 (mon.0) 980 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.699440+0000 mon.vm02 (mon.0) 981 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.699440+0000 mon.vm02 (mon.0) 981 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.700951+0000 mon.vm02 (mon.0) 982 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.700951+0000 mon.vm02 (mon.0) 982 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.701657+0000 mon.vm02 (mon.0) 983 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.701657+0000 mon.vm02 (mon.0) 983 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.706806+0000 mon.vm02 (mon.0) 984 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.706806+0000 mon.vm02 (mon.0) 984 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.708187+0000 mon.vm02 (mon.0) 985 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.708187+0000 mon.vm02 (mon.0) 985 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.870409+0000 mgr.vm02.ttibzz (mgr.14195) 531 : audit [DBG] from='client.15434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:42.870409+0000 mgr.vm02.ttibzz (mgr.14195) 531 : audit [DBG] from='client.15434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:43.243666+0000 mon.vm02 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.102:0/2380620700' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:44.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:43 vm02 bash[17473]: audit 2026-03-10T08:45:43.243666+0000 mon.vm02 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.102:0/2380620700' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:45.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:44 vm02 bash[17473]: audit 2026-03-10T08:45:43.047102+0000 mgr.vm02.ttibzz (mgr.14195) 532 : audit [DBG] from='client.15438 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:45.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:44 vm02 bash[17473]: audit 2026-03-10T08:45:43.047102+0000 mgr.vm02.ttibzz (mgr.14195) 532 : audit [DBG] from='client.15438 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:46.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:45 vm02 bash[17473]: cluster 2026-03-10T08:45:44.068771+0000 mgr.vm02.ttibzz (mgr.14195) 533 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 0 B/s wr, 82 op/s 2026-03-10T08:45:46.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:45 vm02 bash[17473]: cluster 2026-03-10T08:45:44.068771+0000 mgr.vm02.ttibzz (mgr.14195) 533 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 0 B/s wr, 82 op/s 2026-03-10T08:45:48.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:47 vm02 bash[17473]: cluster 2026-03-10T08:45:46.069156+0000 mgr.vm02.ttibzz (mgr.14195) 534 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 341 B/s wr, 82 op/s 2026-03-10T08:45:48.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:47 vm02 bash[17473]: cluster 2026-03-10T08:45:46.069156+0000 mgr.vm02.ttibzz (mgr.14195) 534 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 341 B/s wr, 82 op/s 2026-03-10T08:45:48.418 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:45:48.576 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:48.576 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (53s) 8s ago 6m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:48.576 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (13s) 8s ago 6m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:45:48.576 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:48.576 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:48.763 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:50.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: cluster 2026-03-10T08:45:48.069500+0000 mgr.vm02.ttibzz (mgr.14195) 535 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 341 B/s wr, 71 op/s 2026-03-10T08:45:50.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: cluster 2026-03-10T08:45:48.069500+0000 mgr.vm02.ttibzz (mgr.14195) 535 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 46 KiB/s rd, 341 B/s wr, 71 op/s 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:48.406286+0000 mgr.vm02.ttibzz (mgr.14195) 536 : audit [DBG] from='client.15446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:48.406286+0000 mgr.vm02.ttibzz (mgr.14195) 536 : audit [DBG] from='client.15446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:48.577168+0000 mgr.vm02.ttibzz (mgr.14195) 537 : audit [DBG] from='client.15450 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:48.577168+0000 mgr.vm02.ttibzz (mgr.14195) 537 : audit [DBG] from='client.15450 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:48.767074+0000 mon.vm02 (mon.0) 987 : audit [DBG] from='client.? 192.168.123.102:0/1797955188' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:48.767074+0000 mon.vm02 (mon.0) 987 : audit [DBG] from='client.? 192.168.123.102:0/1797955188' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:49.243771+0000 mon.vm02 (mon.0) 988 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:50.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:49 vm02 bash[17473]: audit 2026-03-10T08:45:49.243771+0000 mon.vm02 (mon.0) 988 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:45:52.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:51 vm02 bash[17473]: cluster 2026-03-10T08:45:50.069856+0000 mgr.vm02.ttibzz (mgr.14195) 538 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 511 B/s wr, 54 op/s 2026-03-10T08:45:52.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:51 vm02 bash[17473]: cluster 2026-03-10T08:45:50.069856+0000 mgr.vm02.ttibzz (mgr.14195) 538 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 511 B/s wr, 54 op/s 2026-03-10T08:45:53.936 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:45:54.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:53 vm02 bash[17473]: cluster 2026-03-10T08:45:52.070224+0000 mgr.vm02.ttibzz (mgr.14195) 539 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 511 B/s wr, 26 op/s 2026-03-10T08:45:54.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:53 vm02 bash[17473]: cluster 2026-03-10T08:45:52.070224+0000 mgr.vm02.ttibzz (mgr.14195) 539 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 511 B/s wr, 26 op/s 2026-03-10T08:45:54.088 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:54.089 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (58s) 14s ago 6m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:54.089 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (19s) 14s ago 6m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:45:54.089 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:54.089 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:54.279 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:45:55.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:54 vm02 bash[17473]: audit 2026-03-10T08:45:53.924688+0000 mgr.vm02.ttibzz (mgr.14195) 540 : audit [DBG] from='client.24949 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:55.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:54 vm02 bash[17473]: audit 2026-03-10T08:45:53.924688+0000 mgr.vm02.ttibzz (mgr.14195) 540 : audit [DBG] from='client.24949 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:55.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:54 vm02 bash[17473]: audit 2026-03-10T08:45:54.282928+0000 mon.vm02 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.102:0/1785699734' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:55.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:54 vm02 bash[17473]: audit 2026-03-10T08:45:54.282928+0000 mon.vm02 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.102:0/1785699734' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:45:56.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:55 vm02 bash[17473]: cluster 2026-03-10T08:45:54.070608+0000 mgr.vm02.ttibzz (mgr.14195) 541 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:56.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:55 vm02 bash[17473]: cluster 2026-03-10T08:45:54.070608+0000 mgr.vm02.ttibzz (mgr.14195) 541 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:56.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:55 vm02 bash[17473]: audit 2026-03-10T08:45:54.090088+0000 mgr.vm02.ttibzz (mgr.14195) 542 : audit [DBG] from='client.15462 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:56.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:55 vm02 bash[17473]: audit 2026-03-10T08:45:54.090088+0000 mgr.vm02.ttibzz (mgr.14195) 542 : audit [DBG] from='client.15462 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: cluster 2026-03-10T08:45:56.070993+0000 mgr.vm02.ttibzz (mgr.14195) 543 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: cluster 2026-03-10T08:45:56.070993+0000 mgr.vm02.ttibzz (mgr.14195) 543 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: audit 2026-03-10T08:45:57.703318+0000 mon.vm02 (mon.0) 990 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: audit 2026-03-10T08:45:57.703318+0000 mon.vm02 (mon.0) 990 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: audit 2026-03-10T08:45:57.707256+0000 mon.vm02 (mon.0) 991 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: audit 2026-03-10T08:45:57.707256+0000 mon.vm02 (mon.0) 991 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: audit 2026-03-10T08:45:57.737744+0000 mon.vm02 (mon.0) 992 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:58.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:57 vm02 bash[17473]: audit 2026-03-10T08:45:57.737744+0000 mon.vm02 (mon.0) 992 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:45:59.478 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:45:59.633 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:45:59.633 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (64s) 19s ago 6m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:45:59.633 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (24s) 19s ago 6m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:45:59.634 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e ebf21c034584 2026-03-10T08:45:59.634 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (6m) 2m ago 6m 101M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:45:59.844 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:46:00.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:59 vm02 bash[17473]: cluster 2026-03-10T08:45:58.071306+0000 mgr.vm02.ttibzz (mgr.14195) 544 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:00.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:45:59 vm02 bash[17473]: cluster 2026-03-10T08:45:58.071306+0000 mgr.vm02.ttibzz (mgr.14195) 544 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:01.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:00 vm02 bash[17473]: audit 2026-03-10T08:45:59.466432+0000 mgr.vm02.ttibzz (mgr.14195) 545 : audit [DBG] from='client.15470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:00 vm02 bash[17473]: audit 2026-03-10T08:45:59.466432+0000 mgr.vm02.ttibzz (mgr.14195) 545 : audit [DBG] from='client.15470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:00 vm02 bash[17473]: audit 2026-03-10T08:45:59.634842+0000 mgr.vm02.ttibzz (mgr.14195) 546 : audit [DBG] from='client.15474 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:00 vm02 bash[17473]: audit 2026-03-10T08:45:59.634842+0000 mgr.vm02.ttibzz (mgr.14195) 546 : audit [DBG] from='client.15474 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:00 vm02 bash[17473]: audit 2026-03-10T08:45:59.847902+0000 mon.vm02 (mon.0) 993 : audit [DBG] from='client.? 192.168.123.102:0/3267119537' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:01.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:00 vm02 bash[17473]: audit 2026-03-10T08:45:59.847902+0000 mon.vm02 (mon.0) 993 : audit [DBG] from='client.? 192.168.123.102:0/3267119537' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:02.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:01 vm02 bash[17473]: cluster 2026-03-10T08:46:00.071779+0000 mgr.vm02.ttibzz (mgr.14195) 547 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:02.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:01 vm02 bash[17473]: cluster 2026-03-10T08:46:00.071779+0000 mgr.vm02.ttibzz (mgr.14195) 547 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: cluster 2026-03-10T08:46:02.072210+0000 mgr.vm02.ttibzz (mgr.14195) 548 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: cluster 2026-03-10T08:46:02.072210+0000 mgr.vm02.ttibzz (mgr.14195) 548 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.007384+0000 mon.vm02 (mon.0) 994 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.007384+0000 mon.vm02 (mon.0) 994 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.012537+0000 mon.vm02 (mon.0) 995 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.012537+0000 mon.vm02 (mon.0) 995 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.013407+0000 mon.vm02 (mon.0) 996 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:46:04.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.013407+0000 mon.vm02 (mon.0) 996 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:46:04.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.014320+0000 mon.vm02 (mon.0) 997 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:46:04.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.014320+0000 mon.vm02 (mon.0) 997 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:46:04.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.019465+0000 mon.vm02 (mon.0) 998 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:04.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.019465+0000 mon.vm02 (mon.0) 998 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:04.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.021205+0000 mon.vm02 (mon.0) 999 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:46:04.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:03 vm02 bash[17473]: audit 2026-03-10T08:46:03.021205+0000 mon.vm02 (mon.0) 999 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:46:05.041 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:05.207 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:05.207 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (69s) 25s ago 6m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:05.207 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (30s) 25s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:05.207 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2s ago 7m - - 2026-03-10T08:46:05.207 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 2s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: cluster 2026-03-10T08:46:03.015527+0000 mgr.vm02.ttibzz (mgr.14195) 549 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: cluster 2026-03-10T08:46:03.015527+0000 mgr.vm02.ttibzz (mgr.14195) 549 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: cluster 2026-03-10T08:46:04.011270+0000 mon.vm02 (mon.0) 1000 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: cluster 2026-03-10T08:46:04.011270+0000 mon.vm02 (mon.0) 1000 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: audit 2026-03-10T08:46:04.251816+0000 mon.vm02 (mon.0) 1001 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: audit 2026-03-10T08:46:04.251816+0000 mon.vm02 (mon.0) 1001 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: audit 2026-03-10T08:46:04.253366+0000 mon.vm02 (mon.0) 1002 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:04 vm02 bash[17473]: audit 2026-03-10T08:46:04.253366+0000 mon.vm02 (mon.0) 1002 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:05.398 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:05.398 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:05.398 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:06.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:05 vm02 bash[17473]: audit 2026-03-10T08:46:05.401806+0000 mon.vm02 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.102:0/581046920' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:06.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:05 vm02 bash[17473]: audit 2026-03-10T08:46:05.401806+0000 mon.vm02 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.102:0/581046920' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:07.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:06 vm02 bash[17473]: cluster 2026-03-10T08:46:05.015870+0000 mgr.vm02.ttibzz (mgr.14195) 550 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:07.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:06 vm02 bash[17473]: cluster 2026-03-10T08:46:05.015870+0000 mgr.vm02.ttibzz (mgr.14195) 550 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:07.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:06 vm02 bash[17473]: audit 2026-03-10T08:46:05.029328+0000 mgr.vm02.ttibzz (mgr.14195) 551 : audit [DBG] from='client.15482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:07.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:06 vm02 bash[17473]: audit 2026-03-10T08:46:05.029328+0000 mgr.vm02.ttibzz (mgr.14195) 551 : audit [DBG] from='client.15482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:07.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:06 vm02 bash[17473]: audit 2026-03-10T08:46:05.208288+0000 mgr.vm02.ttibzz (mgr.14195) 552 : audit [DBG] from='client.15486 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:07.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:06 vm02 bash[17473]: audit 2026-03-10T08:46:05.208288+0000 mgr.vm02.ttibzz (mgr.14195) 552 : audit [DBG] from='client.15486 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:09.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:08 vm02 bash[17473]: cluster 2026-03-10T08:46:07.016318+0000 mgr.vm02.ttibzz (mgr.14195) 553 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:09.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:08 vm02 bash[17473]: cluster 2026-03-10T08:46:07.016318+0000 mgr.vm02.ttibzz (mgr.14195) 553 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:10.578 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:10.734 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:10.734 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (75s) 31s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:10.734 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (35s) 31s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:10.734 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 7s ago 7m - - 2026-03-10T08:46:10.734 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 7s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:10.927 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:10.927 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:10.927 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:11.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:10 vm02 bash[17473]: cluster 2026-03-10T08:46:09.016704+0000 mgr.vm02.ttibzz (mgr.14195) 554 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:11.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:10 vm02 bash[17473]: cluster 2026-03-10T08:46:09.016704+0000 mgr.vm02.ttibzz (mgr.14195) 554 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:11 vm02 bash[17473]: audit 2026-03-10T08:46:10.567042+0000 mgr.vm02.ttibzz (mgr.14195) 555 : audit [DBG] from='client.15494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:11 vm02 bash[17473]: audit 2026-03-10T08:46:10.567042+0000 mgr.vm02.ttibzz (mgr.14195) 555 : audit [DBG] from='client.15494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:11 vm02 bash[17473]: audit 2026-03-10T08:46:10.736037+0000 mgr.vm02.ttibzz (mgr.14195) 556 : audit [DBG] from='client.15498 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:11 vm02 bash[17473]: audit 2026-03-10T08:46:10.736037+0000 mgr.vm02.ttibzz (mgr.14195) 556 : audit [DBG] from='client.15498 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:11 vm02 bash[17473]: audit 2026-03-10T08:46:10.931517+0000 mon.vm02 (mon.0) 1004 : audit [DBG] from='client.? 192.168.123.102:0/501914231' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:11 vm02 bash[17473]: audit 2026-03-10T08:46:10.931517+0000 mon.vm02 (mon.0) 1004 : audit [DBG] from='client.? 192.168.123.102:0/501914231' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:12 vm02 bash[17473]: cluster 2026-03-10T08:46:11.017178+0000 mgr.vm02.ttibzz (mgr.14195) 557 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:12 vm02 bash[17473]: cluster 2026-03-10T08:46:11.017178+0000 mgr.vm02.ttibzz (mgr.14195) 557 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:14 vm02 bash[17473]: cluster 2026-03-10T08:46:13.017565+0000 mgr.vm02.ttibzz (mgr.14195) 558 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:14 vm02 bash[17473]: cluster 2026-03-10T08:46:13.017565+0000 mgr.vm02.ttibzz (mgr.14195) 558 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 93 B/s rd, 187 B/s wr, 0 op/s 2026-03-10T08:46:16.100 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:16.265 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:16.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (80s) 36s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:16.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (41s) 36s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:16.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 13s ago 7m - - 2026-03-10T08:46:16.265 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 13s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:16.450 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:16.450 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:16.450 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:16 vm02 bash[17473]: cluster 2026-03-10T08:46:15.017973+0000 mgr.vm02.ttibzz (mgr.14195) 559 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:16 vm02 bash[17473]: cluster 2026-03-10T08:46:15.017973+0000 mgr.vm02.ttibzz (mgr.14195) 559 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:16 vm02 bash[17473]: audit 2026-03-10T08:46:16.454511+0000 mon.vm02 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.102:0/1419771039' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:16 vm02 bash[17473]: audit 2026-03-10T08:46:16.454511+0000 mon.vm02 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.102:0/1419771039' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:18.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:17 vm02 bash[17473]: audit 2026-03-10T08:46:16.090004+0000 mgr.vm02.ttibzz (mgr.14195) 560 : audit [DBG] from='client.15506 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:18.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:17 vm02 bash[17473]: audit 2026-03-10T08:46:16.090004+0000 mgr.vm02.ttibzz (mgr.14195) 560 : audit [DBG] from='client.15506 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:18.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:17 vm02 bash[17473]: audit 2026-03-10T08:46:16.266925+0000 mgr.vm02.ttibzz (mgr.14195) 561 : audit [DBG] from='client.15510 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:18.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:17 vm02 bash[17473]: audit 2026-03-10T08:46:16.266925+0000 mgr.vm02.ttibzz (mgr.14195) 561 : audit [DBG] from='client.15510 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:18 vm02 bash[17473]: cluster 2026-03-10T08:46:17.018409+0000 mgr.vm02.ttibzz (mgr.14195) 562 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:18 vm02 bash[17473]: cluster 2026-03-10T08:46:17.018409+0000 mgr.vm02.ttibzz (mgr.14195) 562 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:19 vm02 bash[17473]: audit 2026-03-10T08:46:19.247966+0000 mon.vm02 (mon.0) 1006 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:19 vm02 bash[17473]: audit 2026-03-10T08:46:19.247966+0000 mon.vm02 (mon.0) 1006 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:21.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:20 vm02 bash[17473]: cluster 2026-03-10T08:46:19.018754+0000 mgr.vm02.ttibzz (mgr.14195) 563 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:21.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:20 vm02 bash[17473]: cluster 2026-03-10T08:46:19.018754+0000 mgr.vm02.ttibzz (mgr.14195) 563 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:21.622 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:21.776 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:21.776 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (86s) 42s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:21.776 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (46s) 42s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:21.776 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 18s ago 7m - - 2026-03-10T08:46:21.776 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 18s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:21.962 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:21.962 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:21.962 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: cluster 2026-03-10T08:46:21.019101+0000 mgr.vm02.ttibzz (mgr.14195) 564 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: cluster 2026-03-10T08:46:21.019101+0000 mgr.vm02.ttibzz (mgr.14195) 564 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: audit 2026-03-10T08:46:21.610872+0000 mgr.vm02.ttibzz (mgr.14195) 565 : audit [DBG] from='client.15518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: audit 2026-03-10T08:46:21.610872+0000 mgr.vm02.ttibzz (mgr.14195) 565 : audit [DBG] from='client.15518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: audit 2026-03-10T08:46:21.777259+0000 mgr.vm02.ttibzz (mgr.14195) 566 : audit [DBG] from='client.15522 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: audit 2026-03-10T08:46:21.777259+0000 mgr.vm02.ttibzz (mgr.14195) 566 : audit [DBG] from='client.15522 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: audit 2026-03-10T08:46:21.966440+0000 mon.vm02 (mon.0) 1007 : audit [DBG] from='client.? 192.168.123.102:0/3264901141' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:22 vm02 bash[17473]: audit 2026-03-10T08:46:21.966440+0000 mon.vm02 (mon.0) 1007 : audit [DBG] from='client.? 192.168.123.102:0/3264901141' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:25.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:24 vm02 bash[17473]: cluster 2026-03-10T08:46:23.019405+0000 mgr.vm02.ttibzz (mgr.14195) 567 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:25.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:24 vm02 bash[17473]: cluster 2026-03-10T08:46:23.019405+0000 mgr.vm02.ttibzz (mgr.14195) 567 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:26.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:26 vm02 bash[17473]: cluster 2026-03-10T08:46:25.019743+0000 mgr.vm02.ttibzz (mgr.14195) 568 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:26.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:26 vm02 bash[17473]: cluster 2026-03-10T08:46:25.019743+0000 mgr.vm02.ttibzz (mgr.14195) 568 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:27.143 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:27.313 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:27.314 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (91s) 47s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:27.314 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (52s) 47s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:27.314 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 24s ago 7m - - 2026-03-10T08:46:27.314 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 24s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:27.511 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:27.511 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:27.511 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: cluster 2026-03-10T08:46:27.020137+0000 mgr.vm02.ttibzz (mgr.14195) 569 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: cluster 2026-03-10T08:46:27.020137+0000 mgr.vm02.ttibzz (mgr.14195) 569 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: audit 2026-03-10T08:46:27.129677+0000 mgr.vm02.ttibzz (mgr.14195) 570 : audit [DBG] from='client.15530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: audit 2026-03-10T08:46:27.129677+0000 mgr.vm02.ttibzz (mgr.14195) 570 : audit [DBG] from='client.15530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: audit 2026-03-10T08:46:27.314643+0000 mgr.vm02.ttibzz (mgr.14195) 571 : audit [DBG] from='client.15534 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: audit 2026-03-10T08:46:27.314643+0000 mgr.vm02.ttibzz (mgr.14195) 571 : audit [DBG] from='client.15534 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: audit 2026-03-10T08:46:27.515225+0000 mon.vm02 (mon.0) 1008 : audit [DBG] from='client.? 192.168.123.102:0/2284106866' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:28 vm02 bash[17473]: audit 2026-03-10T08:46:27.515225+0000 mon.vm02 (mon.0) 1008 : audit [DBG] from='client.? 192.168.123.102:0/2284106866' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:30.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:30 vm02 bash[17473]: cluster 2026-03-10T08:46:29.020596+0000 mgr.vm02.ttibzz (mgr.14195) 572 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:46:30.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:30 vm02 bash[17473]: cluster 2026-03-10T08:46:29.020596+0000 mgr.vm02.ttibzz (mgr.14195) 572 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:46:32.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:32 vm02 bash[17473]: cluster 2026-03-10T08:46:31.021127+0000 mgr.vm02.ttibzz (mgr.14195) 573 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:32.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:32 vm02 bash[17473]: cluster 2026-03-10T08:46:31.021127+0000 mgr.vm02.ttibzz (mgr.14195) 573 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:32.699 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:32.867 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:32.867 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (97s) 53s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:32.868 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (57s) 53s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:32.868 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 29s ago 7m - - 2026-03-10T08:46:32.868 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 29s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:33.080 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:33.080 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:33.080 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:33.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:33 vm02 bash[17473]: audit 2026-03-10T08:46:32.686303+0000 mgr.vm02.ttibzz (mgr.14195) 574 : audit [DBG] from='client.15542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:33.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:33 vm02 bash[17473]: audit 2026-03-10T08:46:32.686303+0000 mgr.vm02.ttibzz (mgr.14195) 574 : audit [DBG] from='client.15542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:33.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:33 vm02 bash[17473]: audit 2026-03-10T08:46:32.868578+0000 mgr.vm02.ttibzz (mgr.14195) 575 : audit [DBG] from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:33.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:33 vm02 bash[17473]: audit 2026-03-10T08:46:32.868578+0000 mgr.vm02.ttibzz (mgr.14195) 575 : audit [DBG] from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:33.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:33 vm02 bash[17473]: audit 2026-03-10T08:46:33.083405+0000 mon.vm02 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.102:0/2757081286' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:33.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:33 vm02 bash[17473]: audit 2026-03-10T08:46:33.083405+0000 mon.vm02 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.102:0/2757081286' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:34.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:34 vm02 bash[17473]: cluster 2026-03-10T08:46:33.021668+0000 mgr.vm02.ttibzz (mgr.14195) 576 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:34.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:34 vm02 bash[17473]: cluster 2026-03-10T08:46:33.021668+0000 mgr.vm02.ttibzz (mgr.14195) 576 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:35.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:35 vm02 bash[17473]: audit 2026-03-10T08:46:34.248075+0000 mon.vm02 (mon.0) 1010 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:35.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:35 vm02 bash[17473]: audit 2026-03-10T08:46:34.248075+0000 mon.vm02 (mon.0) 1010 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:36.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:36 vm02 bash[17473]: cluster 2026-03-10T08:46:35.022158+0000 mgr.vm02.ttibzz (mgr.14195) 577 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:36.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:36 vm02 bash[17473]: cluster 2026-03-10T08:46:35.022158+0000 mgr.vm02.ttibzz (mgr.14195) 577 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:38.284 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:38.453 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:38.453 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (103s) 58s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:38.453 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (63s) 58s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:38.453 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 35s ago 7m - - 2026-03-10T08:46:38.453 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 35s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:38.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:38 vm02 bash[17473]: cluster 2026-03-10T08:46:37.022645+0000 mgr.vm02.ttibzz (mgr.14195) 578 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:38.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:38 vm02 bash[17473]: cluster 2026-03-10T08:46:37.022645+0000 mgr.vm02.ttibzz (mgr.14195) 578 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:38.655 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:38.655 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:38.655 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:39.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:39 vm02 bash[17473]: audit 2026-03-10T08:46:38.268769+0000 mgr.vm02.ttibzz (mgr.14195) 579 : audit [DBG] from='client.15554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:39.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:39 vm02 bash[17473]: audit 2026-03-10T08:46:38.268769+0000 mgr.vm02.ttibzz (mgr.14195) 579 : audit [DBG] from='client.15554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:39.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:39 vm02 bash[17473]: audit 2026-03-10T08:46:38.454177+0000 mgr.vm02.ttibzz (mgr.14195) 580 : audit [DBG] from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:39.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:39 vm02 bash[17473]: audit 2026-03-10T08:46:38.454177+0000 mgr.vm02.ttibzz (mgr.14195) 580 : audit [DBG] from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:39.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:39 vm02 bash[17473]: audit 2026-03-10T08:46:38.658772+0000 mon.vm02 (mon.0) 1011 : audit [DBG] from='client.? 192.168.123.102:0/1818493140' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:39.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:39 vm02 bash[17473]: audit 2026-03-10T08:46:38.658772+0000 mon.vm02 (mon.0) 1011 : audit [DBG] from='client.? 192.168.123.102:0/1818493140' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:40.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:40 vm02 bash[17473]: cluster 2026-03-10T08:46:39.023219+0000 mgr.vm02.ttibzz (mgr.14195) 581 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:40.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:40 vm02 bash[17473]: cluster 2026-03-10T08:46:39.023219+0000 mgr.vm02.ttibzz (mgr.14195) 581 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:42.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:42 vm02 bash[17473]: cluster 2026-03-10T08:46:41.023727+0000 mgr.vm02.ttibzz (mgr.14195) 582 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:42.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:42 vm02 bash[17473]: cluster 2026-03-10T08:46:41.023727+0000 mgr.vm02.ttibzz (mgr.14195) 582 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:43.838 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:44.003 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:44.004 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (108s) 64s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:44.004 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (69s) 64s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:44.004 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 41s ago 7m - - 2026-03-10T08:46:44.004 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 41s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:44.209 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:44.209 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:44.209 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:44.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:44 vm02 bash[17473]: cluster 2026-03-10T08:46:43.024080+0000 mgr.vm02.ttibzz (mgr.14195) 583 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:44.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:44 vm02 bash[17473]: cluster 2026-03-10T08:46:43.024080+0000 mgr.vm02.ttibzz (mgr.14195) 583 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:44.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:44 vm02 bash[17473]: audit 2026-03-10T08:46:43.826372+0000 mgr.vm02.ttibzz (mgr.14195) 584 : audit [DBG] from='client.15566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:44.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:44 vm02 bash[17473]: audit 2026-03-10T08:46:43.826372+0000 mgr.vm02.ttibzz (mgr.14195) 584 : audit [DBG] from='client.15566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:44.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:44 vm02 bash[17473]: audit 2026-03-10T08:46:44.209402+0000 mon.vm07 (mon.1) 39 : audit [DBG] from='client.? 192.168.123.102:0/1186735622' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:44.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:44 vm02 bash[17473]: audit 2026-03-10T08:46:44.209402+0000 mon.vm07 (mon.1) 39 : audit [DBG] from='client.? 192.168.123.102:0/1186735622' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:45.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:45 vm02 bash[17473]: audit 2026-03-10T08:46:44.004736+0000 mgr.vm02.ttibzz (mgr.14195) 585 : audit [DBG] from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:45.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:45 vm02 bash[17473]: audit 2026-03-10T08:46:44.004736+0000 mgr.vm02.ttibzz (mgr.14195) 585 : audit [DBG] from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:46.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:46 vm02 bash[17473]: cluster 2026-03-10T08:46:45.024445+0000 mgr.vm02.ttibzz (mgr.14195) 586 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:46.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:46 vm02 bash[17473]: cluster 2026-03-10T08:46:45.024445+0000 mgr.vm02.ttibzz (mgr.14195) 586 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:48.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:48 vm02 bash[17473]: cluster 2026-03-10T08:46:47.024858+0000 mgr.vm02.ttibzz (mgr.14195) 587 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:48.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:48 vm02 bash[17473]: cluster 2026-03-10T08:46:47.024858+0000 mgr.vm02.ttibzz (mgr.14195) 587 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:49.403 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:49.584 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:49.584 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (114s) 69s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:49.584 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (74s) 69s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:49.584 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 46s ago 7m - - 2026-03-10T08:46:49.584 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 46s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:49.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:49 vm02 bash[17473]: audit 2026-03-10T08:46:49.248309+0000 mon.vm02 (mon.0) 1012 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:49.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:49 vm02 bash[17473]: audit 2026-03-10T08:46:49.248309+0000 mon.vm02 (mon.0) 1012 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:46:49.791 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:49.791 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:49.791 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: cluster 2026-03-10T08:46:49.025229+0000 mgr.vm02.ttibzz (mgr.14195) 588 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: cluster 2026-03-10T08:46:49.025229+0000 mgr.vm02.ttibzz (mgr.14195) 588 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: audit 2026-03-10T08:46:49.390739+0000 mgr.vm02.ttibzz (mgr.14195) 589 : audit [DBG] from='client.25033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: audit 2026-03-10T08:46:49.390739+0000 mgr.vm02.ttibzz (mgr.14195) 589 : audit [DBG] from='client.25033 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: audit 2026-03-10T08:46:49.585349+0000 mgr.vm02.ttibzz (mgr.14195) 590 : audit [DBG] from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: audit 2026-03-10T08:46:49.585349+0000 mgr.vm02.ttibzz (mgr.14195) 590 : audit [DBG] from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: audit 2026-03-10T08:46:49.794921+0000 mon.vm02 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.102:0/2737484975' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:50.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:50 vm02 bash[17473]: audit 2026-03-10T08:46:49.794921+0000 mon.vm02 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.102:0/2737484975' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:52.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:52 vm02 bash[17473]: cluster 2026-03-10T08:46:51.025735+0000 mgr.vm02.ttibzz (mgr.14195) 591 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:52.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:52 vm02 bash[17473]: cluster 2026-03-10T08:46:51.025735+0000 mgr.vm02.ttibzz (mgr.14195) 591 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:46:54.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:54 vm02 bash[17473]: cluster 2026-03-10T08:46:53.026222+0000 mgr.vm02.ttibzz (mgr.14195) 592 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:54.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:54 vm02 bash[17473]: cluster 2026-03-10T08:46:53.026222+0000 mgr.vm02.ttibzz (mgr.14195) 592 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:54.971 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:46:55.138 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:46:55.138 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (119s) 75s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:46:55.138 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (80s) 75s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:46:55.138 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 52s ago 7m - - 2026-03-10T08:46:55.138 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 52s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:46:55.352 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:46:55.352 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:46:55.352 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:46:55.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:55 vm02 bash[17473]: audit 2026-03-10T08:46:54.959280+0000 mgr.vm02.ttibzz (mgr.14195) 593 : audit [DBG] from='client.15590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:55.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:55 vm02 bash[17473]: audit 2026-03-10T08:46:54.959280+0000 mgr.vm02.ttibzz (mgr.14195) 593 : audit [DBG] from='client.15590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:55.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:55 vm02 bash[17473]: audit 2026-03-10T08:46:55.356659+0000 mon.vm02 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.102:0/881908895' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:55.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:55 vm02 bash[17473]: audit 2026-03-10T08:46:55.356659+0000 mon.vm02 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.102:0/881908895' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:46:56.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:56 vm02 bash[17473]: cluster 2026-03-10T08:46:55.026596+0000 mgr.vm02.ttibzz (mgr.14195) 594 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:56.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:56 vm02 bash[17473]: cluster 2026-03-10T08:46:55.026596+0000 mgr.vm02.ttibzz (mgr.14195) 594 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:46:56.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:56 vm02 bash[17473]: audit 2026-03-10T08:46:55.139855+0000 mgr.vm02.ttibzz (mgr.14195) 595 : audit [DBG] from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:56.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:56 vm02 bash[17473]: audit 2026-03-10T08:46:55.139855+0000 mgr.vm02.ttibzz (mgr.14195) 595 : audit [DBG] from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:46:58.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:58 vm02 bash[17473]: cluster 2026-03-10T08:46:57.027011+0000 mgr.vm02.ttibzz (mgr.14195) 596 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:46:58.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:46:58 vm02 bash[17473]: cluster 2026-03-10T08:46:57.027011+0000 mgr.vm02.ttibzz (mgr.14195) 596 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:47:00.532 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:00.692 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:00.692 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 81s ago 7m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:00.692 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (85s) 81s ago 7m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:00.692 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 57s ago 7m - - 2026-03-10T08:47:00.692 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7m) 57s ago 7m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:00.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:00 vm02 bash[17473]: cluster 2026-03-10T08:46:59.027578+0000 mgr.vm02.ttibzz (mgr.14195) 597 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:47:00.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:00 vm02 bash[17473]: cluster 2026-03-10T08:46:59.027578+0000 mgr.vm02.ttibzz (mgr.14195) 597 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:47:00.884 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:00.884 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:00.884 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:01.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:01 vm02 bash[17473]: audit 2026-03-10T08:47:00.520281+0000 mgr.vm02.ttibzz (mgr.14195) 598 : audit [DBG] from='client.15602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:01.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:01 vm02 bash[17473]: audit 2026-03-10T08:47:00.520281+0000 mgr.vm02.ttibzz (mgr.14195) 598 : audit [DBG] from='client.15602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:01.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:01 vm02 bash[17473]: audit 2026-03-10T08:47:00.693309+0000 mgr.vm02.ttibzz (mgr.14195) 599 : audit [DBG] from='client.25057 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:01.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:01 vm02 bash[17473]: audit 2026-03-10T08:47:00.693309+0000 mgr.vm02.ttibzz (mgr.14195) 599 : audit [DBG] from='client.25057 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:01.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:01 vm02 bash[17473]: audit 2026-03-10T08:47:00.884470+0000 mon.vm07 (mon.1) 40 : audit [DBG] from='client.? 192.168.123.102:0/611321773' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:01.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:01 vm02 bash[17473]: audit 2026-03-10T08:47:00.884470+0000 mon.vm07 (mon.1) 40 : audit [DBG] from='client.? 192.168.123.102:0/611321773' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:02.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:02 vm02 bash[17473]: cluster 2026-03-10T08:47:01.028083+0000 mgr.vm02.ttibzz (mgr.14195) 600 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:02.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:02 vm02 bash[17473]: cluster 2026-03-10T08:47:01.028083+0000 mgr.vm02.ttibzz (mgr.14195) 600 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:03.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:03 vm02 bash[17473]: audit 2026-03-10T08:47:03.066205+0000 mon.vm02 (mon.0) 1015 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:47:03.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:03 vm02 bash[17473]: audit 2026-03-10T08:47:03.066205+0000 mon.vm02 (mon.0) 1015 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:47:03.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:03 vm02 bash[17473]: audit 2026-03-10T08:47:03.374530+0000 mon.vm02 (mon.0) 1016 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:47:03.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:03 vm02 bash[17473]: audit 2026-03-10T08:47:03.374530+0000 mon.vm02 (mon.0) 1016 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:47:03.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:03 vm02 bash[17473]: audit 2026-03-10T08:47:03.379918+0000 mon.vm02 (mon.0) 1017 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:47:03.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:03 vm02 bash[17473]: audit 2026-03-10T08:47:03.379918+0000 mon.vm02 (mon.0) 1017 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: cluster 2026-03-10T08:47:03.028471+0000 mgr.vm02.ttibzz (mgr.14195) 601 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: cluster 2026-03-10T08:47:03.028471+0000 mgr.vm02.ttibzz (mgr.14195) 601 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.714073+0000 mon.vm02 (mon.0) 1018 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.714073+0000 mon.vm02 (mon.0) 1018 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.714630+0000 mon.vm02 (mon.0) 1019 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.714630+0000 mon.vm02 (mon.0) 1019 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: cluster 2026-03-10T08:47:03.715709+0000 mgr.vm02.ttibzz (mgr.14195) 602 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: cluster 2026-03-10T08:47:03.715709+0000 mgr.vm02.ttibzz (mgr.14195) 602 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.719811+0000 mon.vm02 (mon.0) 1020 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.719811+0000 mon.vm02 (mon.0) 1020 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.721271+0000 mon.vm02 (mon.0) 1021 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:03.721271+0000 mon.vm02 (mon.0) 1021 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:04.251069+0000 mon.vm02 (mon.0) 1022 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:04 vm02 bash[17473]: audit 2026-03-10T08:47:04.251069+0000 mon.vm02 (mon.0) 1022 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:06.068 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:06.231 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:06.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 86s ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:06.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (91s) 86s ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:06.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 63s ago 8m - - 2026-03-10T08:47:06.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 63s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:06.438 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:06.438 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:06.438 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:06 vm02 bash[17473]: cluster 2026-03-10T08:47:05.716188+0000 mgr.vm02.ttibzz (mgr.14195) 603 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:06 vm02 bash[17473]: cluster 2026-03-10T08:47:05.716188+0000 mgr.vm02.ttibzz (mgr.14195) 603 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:06 vm02 bash[17473]: audit 2026-03-10T08:47:06.442918+0000 mon.vm02 (mon.0) 1023 : audit [DBG] from='client.? 192.168.123.102:0/630237264' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:06 vm02 bash[17473]: audit 2026-03-10T08:47:06.442918+0000 mon.vm02 (mon.0) 1023 : audit [DBG] from='client.? 192.168.123.102:0/630237264' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:08.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:07 vm02 bash[17473]: audit 2026-03-10T08:47:06.056408+0000 mgr.vm02.ttibzz (mgr.14195) 604 : audit [DBG] from='client.15614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:08.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:07 vm02 bash[17473]: audit 2026-03-10T08:47:06.056408+0000 mgr.vm02.ttibzz (mgr.14195) 604 : audit [DBG] from='client.15614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:08.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:07 vm02 bash[17473]: audit 2026-03-10T08:47:06.230787+0000 mgr.vm02.ttibzz (mgr.14195) 605 : audit [DBG] from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:08.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:07 vm02 bash[17473]: audit 2026-03-10T08:47:06.230787+0000 mgr.vm02.ttibzz (mgr.14195) 605 : audit [DBG] from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:09.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:08 vm02 bash[17473]: cluster 2026-03-10T08:47:07.716568+0000 mgr.vm02.ttibzz (mgr.14195) 606 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:09.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:08 vm02 bash[17473]: cluster 2026-03-10T08:47:07.716568+0000 mgr.vm02.ttibzz (mgr.14195) 606 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:11.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:10 vm02 bash[17473]: cluster 2026-03-10T08:47:09.716970+0000 mgr.vm02.ttibzz (mgr.14195) 607 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 191 B/s rd, 383 B/s wr, 0 op/s 2026-03-10T08:47:11.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:10 vm02 bash[17473]: cluster 2026-03-10T08:47:09.716970+0000 mgr.vm02.ttibzz (mgr.14195) 607 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 191 B/s rd, 383 B/s wr, 0 op/s 2026-03-10T08:47:11.621 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:11.790 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:11.790 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 92s ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:11.790 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (96s) 92s ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:11.790 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 68s ago 8m - - 2026-03-10T08:47:11.790 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 68s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:11.999 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:11.999 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:11.999 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: audit 2026-03-10T08:47:11.609484+0000 mgr.vm02.ttibzz (mgr.14195) 608 : audit [DBG] from='client.15626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: audit 2026-03-10T08:47:11.609484+0000 mgr.vm02.ttibzz (mgr.14195) 608 : audit [DBG] from='client.15626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: cluster 2026-03-10T08:47:11.717521+0000 mgr.vm02.ttibzz (mgr.14195) 609 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: cluster 2026-03-10T08:47:11.717521+0000 mgr.vm02.ttibzz (mgr.14195) 609 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: audit 2026-03-10T08:47:11.792467+0000 mgr.vm02.ttibzz (mgr.14195) 610 : audit [DBG] from='client.25075 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: audit 2026-03-10T08:47:11.792467+0000 mgr.vm02.ttibzz (mgr.14195) 610 : audit [DBG] from='client.25075 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: audit 2026-03-10T08:47:12.003645+0000 mon.vm02 (mon.0) 1024 : audit [DBG] from='client.? 192.168.123.102:0/1023530450' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:13.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:12 vm02 bash[17473]: audit 2026-03-10T08:47:12.003645+0000 mon.vm02 (mon.0) 1024 : audit [DBG] from='client.? 192.168.123.102:0/1023530450' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:14 vm02 bash[17473]: cluster 2026-03-10T08:47:13.717941+0000 mgr.vm02.ttibzz (mgr.14195) 611 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:14 vm02 bash[17473]: cluster 2026-03-10T08:47:13.717941+0000 mgr.vm02.ttibzz (mgr.14195) 611 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 95 B/s rd, 191 B/s wr, 0 op/s 2026-03-10T08:47:17.182 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:16 vm02 bash[17473]: cluster 2026-03-10T08:47:15.718381+0000 mgr.vm02.ttibzz (mgr.14195) 612 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:16 vm02 bash[17473]: cluster 2026-03-10T08:47:15.718381+0000 mgr.vm02.ttibzz (mgr.14195) 612 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:17.343 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:17.343 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 97s ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:17.343 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (102s) 97s ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:17.343 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 74s ago 8m - - 2026-03-10T08:47:17.343 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 74s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:17.547 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:17.547 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:17.547 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:18.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:17 vm02 bash[17473]: audit 2026-03-10T08:47:17.551674+0000 mon.vm02 (mon.0) 1025 : audit [DBG] from='client.? 192.168.123.102:0/391284229' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:18.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:17 vm02 bash[17473]: audit 2026-03-10T08:47:17.551674+0000 mon.vm02 (mon.0) 1025 : audit [DBG] from='client.? 192.168.123.102:0/391284229' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:18 vm02 bash[17473]: audit 2026-03-10T08:47:17.171353+0000 mgr.vm02.ttibzz (mgr.14195) 613 : audit [DBG] from='client.15638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:18 vm02 bash[17473]: audit 2026-03-10T08:47:17.171353+0000 mgr.vm02.ttibzz (mgr.14195) 613 : audit [DBG] from='client.15638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:18 vm02 bash[17473]: audit 2026-03-10T08:47:17.344936+0000 mgr.vm02.ttibzz (mgr.14195) 614 : audit [DBG] from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:18 vm02 bash[17473]: audit 2026-03-10T08:47:17.344936+0000 mgr.vm02.ttibzz (mgr.14195) 614 : audit [DBG] from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:18 vm02 bash[17473]: cluster 2026-03-10T08:47:17.718799+0000 mgr.vm02.ttibzz (mgr.14195) 615 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:18 vm02 bash[17473]: cluster 2026-03-10T08:47:17.718799+0000 mgr.vm02.ttibzz (mgr.14195) 615 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:20.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:19 vm02 bash[17473]: audit 2026-03-10T08:47:19.251426+0000 mon.vm02 (mon.0) 1026 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:20.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:19 vm02 bash[17473]: audit 2026-03-10T08:47:19.251426+0000 mon.vm02 (mon.0) 1026 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:20 vm02 bash[17473]: cluster 2026-03-10T08:47:19.719272+0000 mgr.vm02.ttibzz (mgr.14195) 616 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:21.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:20 vm02 bash[17473]: cluster 2026-03-10T08:47:19.719272+0000 mgr.vm02.ttibzz (mgr.14195) 616 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:22.732 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:22.910 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:22.910 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 103s ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:22.910 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (108s) 103s ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:22.910 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 79s ago 8m - - 2026-03-10T08:47:22.910 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 79s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:23.123 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:23.123 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:23.123 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:22 vm02 bash[17473]: cluster 2026-03-10T08:47:21.719813+0000 mgr.vm02.ttibzz (mgr.14195) 617 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:23.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:22 vm02 bash[17473]: cluster 2026-03-10T08:47:21.719813+0000 mgr.vm02.ttibzz (mgr.14195) 617 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:23 vm02 bash[17473]: audit 2026-03-10T08:47:22.721043+0000 mgr.vm02.ttibzz (mgr.14195) 618 : audit [DBG] from='client.15650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:23 vm02 bash[17473]: audit 2026-03-10T08:47:22.721043+0000 mgr.vm02.ttibzz (mgr.14195) 618 : audit [DBG] from='client.15650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:23 vm02 bash[17473]: audit 2026-03-10T08:47:22.910597+0000 mgr.vm02.ttibzz (mgr.14195) 619 : audit [DBG] from='client.15654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:23 vm02 bash[17473]: audit 2026-03-10T08:47:22.910597+0000 mgr.vm02.ttibzz (mgr.14195) 619 : audit [DBG] from='client.15654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:23 vm02 bash[17473]: audit 2026-03-10T08:47:23.127336+0000 mon.vm02 (mon.0) 1027 : audit [DBG] from='client.? 192.168.123.102:0/2159590926' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:23 vm02 bash[17473]: audit 2026-03-10T08:47:23.127336+0000 mon.vm02 (mon.0) 1027 : audit [DBG] from='client.? 192.168.123.102:0/2159590926' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:25.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:24 vm02 bash[17473]: cluster 2026-03-10T08:47:23.720368+0000 mgr.vm02.ttibzz (mgr.14195) 620 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:25.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:24 vm02 bash[17473]: cluster 2026-03-10T08:47:23.720368+0000 mgr.vm02.ttibzz (mgr.14195) 620 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:27.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:26 vm02 bash[17473]: cluster 2026-03-10T08:47:25.720858+0000 mgr.vm02.ttibzz (mgr.14195) 621 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:27.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:26 vm02 bash[17473]: cluster 2026-03-10T08:47:25.720858+0000 mgr.vm02.ttibzz (mgr.14195) 621 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:28.316 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:28.491 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:28.491 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 108s ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:28.491 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (113s) 108s ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:28.491 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 85s ago 8m - - 2026-03-10T08:47:28.491 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 85s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:28.701 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:28.701 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:28.701 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:29.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:28 vm02 bash[17473]: cluster 2026-03-10T08:47:27.721244+0000 mgr.vm02.ttibzz (mgr.14195) 622 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:47:29.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:28 vm02 bash[17473]: cluster 2026-03-10T08:47:27.721244+0000 mgr.vm02.ttibzz (mgr.14195) 622 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:47:29.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:28 vm02 bash[17473]: audit 2026-03-10T08:47:28.705504+0000 mon.vm02 (mon.0) 1028 : audit [DBG] from='client.? 192.168.123.102:0/250948760' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:29.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:28 vm02 bash[17473]: audit 2026-03-10T08:47:28.705504+0000 mon.vm02 (mon.0) 1028 : audit [DBG] from='client.? 192.168.123.102:0/250948760' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:29 vm02 bash[17473]: audit 2026-03-10T08:47:28.301983+0000 mgr.vm02.ttibzz (mgr.14195) 623 : audit [DBG] from='client.15662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:29 vm02 bash[17473]: audit 2026-03-10T08:47:28.301983+0000 mgr.vm02.ttibzz (mgr.14195) 623 : audit [DBG] from='client.15662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:29 vm02 bash[17473]: audit 2026-03-10T08:47:28.491919+0000 mgr.vm02.ttibzz (mgr.14195) 624 : audit [DBG] from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:29 vm02 bash[17473]: audit 2026-03-10T08:47:28.491919+0000 mgr.vm02.ttibzz (mgr.14195) 624 : audit [DBG] from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:30 vm02 bash[17473]: cluster 2026-03-10T08:47:29.721679+0000 mgr.vm02.ttibzz (mgr.14195) 625 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:30 vm02 bash[17473]: cluster 2026-03-10T08:47:29.721679+0000 mgr.vm02.ttibzz (mgr.14195) 625 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:32 vm02 bash[17473]: cluster 2026-03-10T08:47:31.722156+0000 mgr.vm02.ttibzz (mgr.14195) 626 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:33.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:32 vm02 bash[17473]: cluster 2026-03-10T08:47:31.722156+0000 mgr.vm02.ttibzz (mgr.14195) 626 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:33.882 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:34.038 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:34.038 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 114s ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:34.038 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (119s) 114s ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:34.038 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 91s ago 8m - - 2026-03-10T08:47:34.038 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 91s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:34.257 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:34.258 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:34.258 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: cluster 2026-03-10T08:47:33.722722+0000 mgr.vm02.ttibzz (mgr.14195) 627 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: cluster 2026-03-10T08:47:33.722722+0000 mgr.vm02.ttibzz (mgr.14195) 627 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: audit 2026-03-10T08:47:33.869737+0000 mgr.vm02.ttibzz (mgr.14195) 628 : audit [DBG] from='client.15674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: audit 2026-03-10T08:47:33.869737+0000 mgr.vm02.ttibzz (mgr.14195) 628 : audit [DBG] from='client.15674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: audit 2026-03-10T08:47:34.251787+0000 mon.vm02 (mon.0) 1029 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: audit 2026-03-10T08:47:34.251787+0000 mon.vm02 (mon.0) 1029 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: audit 2026-03-10T08:47:34.262210+0000 mon.vm02 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.102:0/1163870739' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:34 vm02 bash[17473]: audit 2026-03-10T08:47:34.262210+0000 mon.vm02 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.102:0/1163870739' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:36.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:35 vm02 bash[17473]: audit 2026-03-10T08:47:34.040415+0000 mgr.vm02.ttibzz (mgr.14195) 629 : audit [DBG] from='client.15678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:36.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:35 vm02 bash[17473]: audit 2026-03-10T08:47:34.040415+0000 mgr.vm02.ttibzz (mgr.14195) 629 : audit [DBG] from='client.15678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:37.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:36 vm02 bash[17473]: cluster 2026-03-10T08:47:35.723106+0000 mgr.vm02.ttibzz (mgr.14195) 630 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:37.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:36 vm02 bash[17473]: cluster 2026-03-10T08:47:35.723106+0000 mgr.vm02.ttibzz (mgr.14195) 630 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:39.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:38 vm02 bash[17473]: cluster 2026-03-10T08:47:37.723621+0000 mgr.vm02.ttibzz (mgr.14195) 631 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:39.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:38 vm02 bash[17473]: cluster 2026-03-10T08:47:37.723621+0000 mgr.vm02.ttibzz (mgr.14195) 631 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:39.440 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:39.597 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:39.597 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 119s ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:39.597 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 119s ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:39.597 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 96s ago 8m - - 2026-03-10T08:47:39.597 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 96s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:39.802 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:39.803 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:39.803 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:40.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:39 vm02 bash[17473]: audit 2026-03-10T08:47:39.807370+0000 mon.vm02 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.102:0/1803483201' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:40.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:39 vm02 bash[17473]: audit 2026-03-10T08:47:39.807370+0000 mon.vm02 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.102:0/1803483201' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:41.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:40 vm02 bash[17473]: audit 2026-03-10T08:47:39.428533+0000 mgr.vm02.ttibzz (mgr.14195) 632 : audit [DBG] from='client.25115 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:41.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:40 vm02 bash[17473]: audit 2026-03-10T08:47:39.428533+0000 mgr.vm02.ttibzz (mgr.14195) 632 : audit [DBG] from='client.25115 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:41.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:40 vm02 bash[17473]: audit 2026-03-10T08:47:39.597750+0000 mgr.vm02.ttibzz (mgr.14195) 633 : audit [DBG] from='client.15690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:41.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:40 vm02 bash[17473]: audit 2026-03-10T08:47:39.597750+0000 mgr.vm02.ttibzz (mgr.14195) 633 : audit [DBG] from='client.15690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:41.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:40 vm02 bash[17473]: cluster 2026-03-10T08:47:39.724061+0000 mgr.vm02.ttibzz (mgr.14195) 634 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:41.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:40 vm02 bash[17473]: cluster 2026-03-10T08:47:39.724061+0000 mgr.vm02.ttibzz (mgr.14195) 634 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:42 vm02 bash[17473]: cluster 2026-03-10T08:47:41.724452+0000 mgr.vm02.ttibzz (mgr.14195) 635 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:42 vm02 bash[17473]: cluster 2026-03-10T08:47:41.724452+0000 mgr.vm02.ttibzz (mgr.14195) 635 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:44.985 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:45.140 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:45.140 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 2m ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:45.140 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:45.140 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 102s ago 8m - - 2026-03-10T08:47:45.140 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 102s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:45.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:44 vm02 bash[17473]: cluster 2026-03-10T08:47:43.724861+0000 mgr.vm02.ttibzz (mgr.14195) 636 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:45.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:44 vm02 bash[17473]: cluster 2026-03-10T08:47:43.724861+0000 mgr.vm02.ttibzz (mgr.14195) 636 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:45.342 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:45.342 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:45.342 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:46.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:45 vm02 bash[17473]: audit 2026-03-10T08:47:44.972741+0000 mgr.vm02.ttibzz (mgr.14195) 637 : audit [DBG] from='client.15698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:46.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:45 vm02 bash[17473]: audit 2026-03-10T08:47:44.972741+0000 mgr.vm02.ttibzz (mgr.14195) 637 : audit [DBG] from='client.15698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:46.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:45 vm02 bash[17473]: audit 2026-03-10T08:47:45.346719+0000 mon.vm02 (mon.0) 1032 : audit [DBG] from='client.? 192.168.123.102:0/2489807177' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:46.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:45 vm02 bash[17473]: audit 2026-03-10T08:47:45.346719+0000 mon.vm02 (mon.0) 1032 : audit [DBG] from='client.? 192.168.123.102:0/2489807177' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:46 vm02 bash[17473]: audit 2026-03-10T08:47:45.142174+0000 mgr.vm02.ttibzz (mgr.14195) 638 : audit [DBG] from='client.15702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:46 vm02 bash[17473]: audit 2026-03-10T08:47:45.142174+0000 mgr.vm02.ttibzz (mgr.14195) 638 : audit [DBG] from='client.15702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:46 vm02 bash[17473]: cluster 2026-03-10T08:47:45.725420+0000 mgr.vm02.ttibzz (mgr.14195) 639 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:46 vm02 bash[17473]: cluster 2026-03-10T08:47:45.725420+0000 mgr.vm02.ttibzz (mgr.14195) 639 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:49.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:48 vm02 bash[17473]: cluster 2026-03-10T08:47:47.725877+0000 mgr.vm02.ttibzz (mgr.14195) 640 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:49.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:48 vm02 bash[17473]: cluster 2026-03-10T08:47:47.725877+0000 mgr.vm02.ttibzz (mgr.14195) 640 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:49 vm02 bash[17473]: audit 2026-03-10T08:47:49.251941+0000 mon.vm02 (mon.0) 1033 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:49 vm02 bash[17473]: audit 2026-03-10T08:47:49.251941+0000 mon.vm02 (mon.0) 1033 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:47:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:49 vm02 bash[17473]: cluster 2026-03-10T08:47:49.726296+0000 mgr.vm02.ttibzz (mgr.14195) 641 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:49 vm02 bash[17473]: cluster 2026-03-10T08:47:49.726296+0000 mgr.vm02.ttibzz (mgr.14195) 641 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:47:50.525 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:50.679 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:50.679 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (2m) 2m ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:50.679 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:50.679 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 107s ago 8m - - 2026-03-10T08:47:50.679 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 107s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:50.868 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:50.868 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:50.868 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:51.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:50 vm02 bash[17473]: audit 2026-03-10T08:47:50.513641+0000 mgr.vm02.ttibzz (mgr.14195) 642 : audit [DBG] from='client.15710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:50 vm02 bash[17473]: audit 2026-03-10T08:47:50.513641+0000 mgr.vm02.ttibzz (mgr.14195) 642 : audit [DBG] from='client.15710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:50 vm02 bash[17473]: audit 2026-03-10T08:47:50.681283+0000 mgr.vm02.ttibzz (mgr.14195) 643 : audit [DBG] from='client.15714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:50 vm02 bash[17473]: audit 2026-03-10T08:47:50.681283+0000 mgr.vm02.ttibzz (mgr.14195) 643 : audit [DBG] from='client.15714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:50 vm02 bash[17473]: audit 2026-03-10T08:47:50.873010+0000 mon.vm02 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.102:0/3271213906' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:50 vm02 bash[17473]: audit 2026-03-10T08:47:50.873010+0000 mon.vm02 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.102:0/3271213906' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:52.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:51 vm02 bash[17473]: cluster 2026-03-10T08:47:51.726810+0000 mgr.vm02.ttibzz (mgr.14195) 644 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:52.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:51 vm02 bash[17473]: cluster 2026-03-10T08:47:51.726810+0000 mgr.vm02.ttibzz (mgr.14195) 644 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:55.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:54 vm02 bash[17473]: cluster 2026-03-10T08:47:53.727312+0000 mgr.vm02.ttibzz (mgr.14195) 645 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:55.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:54 vm02 bash[17473]: cluster 2026-03-10T08:47:53.727312+0000 mgr.vm02.ttibzz (mgr.14195) 645 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:56.078 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:47:56.237 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:47:56.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:47:56.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:47:56.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 113s ago 8m - - 2026-03-10T08:47:56.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 113s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:47:56.434 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:47:56.434 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:47:56.434 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:47:57.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:56 vm02 bash[17473]: cluster 2026-03-10T08:47:55.727732+0000 mgr.vm02.ttibzz (mgr.14195) 646 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:57.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:56 vm02 bash[17473]: cluster 2026-03-10T08:47:55.727732+0000 mgr.vm02.ttibzz (mgr.14195) 646 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:47:57.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:56 vm02 bash[17473]: audit 2026-03-10T08:47:56.438838+0000 mon.vm02 (mon.0) 1035 : audit [DBG] from='client.? 192.168.123.102:0/2816475815' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:57.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:56 vm02 bash[17473]: audit 2026-03-10T08:47:56.438838+0000 mon.vm02 (mon.0) 1035 : audit [DBG] from='client.? 192.168.123.102:0/2816475815' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:47:58.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:57 vm02 bash[17473]: audit 2026-03-10T08:47:56.065341+0000 mgr.vm02.ttibzz (mgr.14195) 647 : audit [DBG] from='client.15722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:58.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:57 vm02 bash[17473]: audit 2026-03-10T08:47:56.065341+0000 mgr.vm02.ttibzz (mgr.14195) 647 : audit [DBG] from='client.15722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:58.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:57 vm02 bash[17473]: audit 2026-03-10T08:47:56.239885+0000 mgr.vm02.ttibzz (mgr.14195) 648 : audit [DBG] from='client.15726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:58.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:57 vm02 bash[17473]: audit 2026-03-10T08:47:56.239885+0000 mgr.vm02.ttibzz (mgr.14195) 648 : audit [DBG] from='client.15726 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:47:59.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:58 vm02 bash[17473]: cluster 2026-03-10T08:47:57.728222+0000 mgr.vm02.ttibzz (mgr.14195) 649 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:47:59.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:47:58 vm02 bash[17473]: cluster 2026-03-10T08:47:57.728222+0000 mgr.vm02.ttibzz (mgr.14195) 649 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:48:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:00 vm02 bash[17473]: cluster 2026-03-10T08:47:59.728722+0000 mgr.vm02.ttibzz (mgr.14195) 650 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:00 vm02 bash[17473]: cluster 2026-03-10T08:47:59.728722+0000 mgr.vm02.ttibzz (mgr.14195) 650 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:01.617 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:01.788 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:01.788 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 8m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:01.788 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 8m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:01.788 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 118s ago 8m - - 2026-03-10T08:48:01.788 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (8m) 118s ago 8m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:02.004 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:02.005 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:02.005 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:03.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: audit 2026-03-10T08:48:01.601650+0000 mgr.vm02.ttibzz (mgr.14195) 651 : audit [DBG] from='client.15734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:03.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: audit 2026-03-10T08:48:01.601650+0000 mgr.vm02.ttibzz (mgr.14195) 651 : audit [DBG] from='client.15734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:03.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: cluster 2026-03-10T08:48:01.729175+0000 mgr.vm02.ttibzz (mgr.14195) 652 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:03.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: cluster 2026-03-10T08:48:01.729175+0000 mgr.vm02.ttibzz (mgr.14195) 652 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:03.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: audit 2026-03-10T08:48:01.790541+0000 mgr.vm02.ttibzz (mgr.14195) 653 : audit [DBG] from='client.25145 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:03.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: audit 2026-03-10T08:48:01.790541+0000 mgr.vm02.ttibzz (mgr.14195) 653 : audit [DBG] from='client.25145 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:03.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: audit 2026-03-10T08:48:02.009543+0000 mon.vm02 (mon.0) 1036 : audit [DBG] from='client.? 192.168.123.102:0/3046387660' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:03.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:02 vm02 bash[17473]: audit 2026-03-10T08:48:02.009543+0000 mon.vm02 (mon.0) 1036 : audit [DBG] from='client.? 192.168.123.102:0/3046387660' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:04.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:03 vm02 bash[17473]: audit 2026-03-10T08:48:03.762951+0000 mon.vm02 (mon.0) 1037 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:48:04.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:03 vm02 bash[17473]: audit 2026-03-10T08:48:03.762951+0000 mon.vm02 (mon.0) 1037 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: cluster 2026-03-10T08:48:03.729624+0000 mgr.vm02.ttibzz (mgr.14195) 654 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: cluster 2026-03-10T08:48:03.729624+0000 mgr.vm02.ttibzz (mgr.14195) 654 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.096309+0000 mon.vm02 (mon.0) 1038 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.096309+0000 mon.vm02 (mon.0) 1038 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm02", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.125696+0000 mon.vm02 (mon.0) 1039 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.125696+0000 mon.vm02 (mon.0) 1039 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config rm", "who": "osd/host:vm07", "name": "osd_memory_target"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.126507+0000 mon.vm02 (mon.0) 1040 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.126507+0000 mon.vm02 (mon.0) 1040 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.126981+0000 mon.vm02 (mon.0) 1041 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.126981+0000 mon.vm02 (mon.0) 1041 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.135698+0000 mon.vm02 (mon.0) 1042 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.135698+0000 mon.vm02 (mon.0) 1042 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.137379+0000 mon.vm02 (mon.0) 1043 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.137379+0000 mon.vm02 (mon.0) 1043 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.252322+0000 mon.vm02 (mon.0) 1044 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:05.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:04 vm02 bash[17473]: audit 2026-03-10T08:48:04.252322+0000 mon.vm02 (mon.0) 1044 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:06.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:05 vm02 bash[17473]: cluster 2026-03-10T08:48:04.128103+0000 mgr.vm02.ttibzz (mgr.14195) 655 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-03-10T08:48:06.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:05 vm02 bash[17473]: cluster 2026-03-10T08:48:04.128103+0000 mgr.vm02.ttibzz (mgr.14195) 655 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-03-10T08:48:06.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:05 vm02 bash[17473]: cluster 2026-03-10T08:48:04.128492+0000 mgr.vm02.ttibzz (mgr.14195) 656 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:06.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:05 vm02 bash[17473]: cluster 2026-03-10T08:48:04.128492+0000 mgr.vm02.ttibzz (mgr.14195) 656 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:07.190 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:07.355 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:07.355 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:07.355 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:07.355 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:07.355 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:07.556 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:07.556 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:07.556 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:07 vm02 bash[17473]: cluster 2026-03-10T08:48:06.128950+0000 mgr.vm02.ttibzz (mgr.14195) 657 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:07 vm02 bash[17473]: cluster 2026-03-10T08:48:06.128950+0000 mgr.vm02.ttibzz (mgr.14195) 657 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:07 vm02 bash[17473]: audit 2026-03-10T08:48:07.561375+0000 mon.vm02 (mon.0) 1045 : audit [DBG] from='client.? 192.168.123.102:0/3689868395' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:07 vm02 bash[17473]: audit 2026-03-10T08:48:07.561375+0000 mon.vm02 (mon.0) 1045 : audit [DBG] from='client.? 192.168.123.102:0/3689868395' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:09 vm02 bash[17473]: audit 2026-03-10T08:48:07.178078+0000 mgr.vm02.ttibzz (mgr.14195) 658 : audit [DBG] from='client.15746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:09 vm02 bash[17473]: audit 2026-03-10T08:48:07.178078+0000 mgr.vm02.ttibzz (mgr.14195) 658 : audit [DBG] from='client.15746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:09 vm02 bash[17473]: audit 2026-03-10T08:48:07.357590+0000 mgr.vm02.ttibzz (mgr.14195) 659 : audit [DBG] from='client.15750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:09 vm02 bash[17473]: audit 2026-03-10T08:48:07.357590+0000 mgr.vm02.ttibzz (mgr.14195) 659 : audit [DBG] from='client.15750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:10.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:10 vm02 bash[17473]: cluster 2026-03-10T08:48:08.129326+0000 mgr.vm02.ttibzz (mgr.14195) 660 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:48:10.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:10 vm02 bash[17473]: cluster 2026-03-10T08:48:08.129326+0000 mgr.vm02.ttibzz (mgr.14195) 660 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:48:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:11 vm02 bash[17473]: cluster 2026-03-10T08:48:10.129772+0000 mgr.vm02.ttibzz (mgr.14195) 661 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:11 vm02 bash[17473]: cluster 2026-03-10T08:48:10.129772+0000 mgr.vm02.ttibzz (mgr.14195) 661 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:12.738 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:12.897 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:12.897 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:12.897 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:12.897 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:12.897 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:13.102 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:13.102 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:13.102 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: cluster 2026-03-10T08:48:12.130286+0000 mgr.vm02.ttibzz (mgr.14195) 662 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: cluster 2026-03-10T08:48:12.130286+0000 mgr.vm02.ttibzz (mgr.14195) 662 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: audit 2026-03-10T08:48:12.727274+0000 mgr.vm02.ttibzz (mgr.14195) 663 : audit [DBG] from='client.25157 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: audit 2026-03-10T08:48:12.727274+0000 mgr.vm02.ttibzz (mgr.14195) 663 : audit [DBG] from='client.25157 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: audit 2026-03-10T08:48:12.899857+0000 mgr.vm02.ttibzz (mgr.14195) 664 : audit [DBG] from='client.15762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: audit 2026-03-10T08:48:12.899857+0000 mgr.vm02.ttibzz (mgr.14195) 664 : audit [DBG] from='client.15762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: audit 2026-03-10T08:48:13.107338+0000 mon.vm02 (mon.0) 1046 : audit [DBG] from='client.? 192.168.123.102:0/2881942715' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:13 vm02 bash[17473]: audit 2026-03-10T08:48:13.107338+0000 mon.vm02 (mon.0) 1046 : audit [DBG] from='client.? 192.168.123.102:0/2881942715' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:15.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:15 vm02 bash[17473]: cluster 2026-03-10T08:48:14.130765+0000 mgr.vm02.ttibzz (mgr.14195) 665 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T08:48:15.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:15 vm02 bash[17473]: cluster 2026-03-10T08:48:14.130765+0000 mgr.vm02.ttibzz (mgr.14195) 665 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T08:48:17.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:17 vm02 bash[17473]: cluster 2026-03-10T08:48:16.131184+0000 mgr.vm02.ttibzz (mgr.14195) 666 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:17.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:17 vm02 bash[17473]: cluster 2026-03-10T08:48:16.131184+0000 mgr.vm02.ttibzz (mgr.14195) 666 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:18.289 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:18.448 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:18.448 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:18.448 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:18.448 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:18.448 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:18.648 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:18.648 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:18.648 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:19.688 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: cluster 2026-03-10T08:48:18.131612+0000 mgr.vm02.ttibzz (mgr.14195) 667 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: cluster 2026-03-10T08:48:18.131612+0000 mgr.vm02.ttibzz (mgr.14195) 667 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:18.277990+0000 mgr.vm02.ttibzz (mgr.14195) 668 : audit [DBG] from='client.15770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:18.277990+0000 mgr.vm02.ttibzz (mgr.14195) 668 : audit [DBG] from='client.15770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:18.450311+0000 mgr.vm02.ttibzz (mgr.14195) 669 : audit [DBG] from='client.15774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:18.450311+0000 mgr.vm02.ttibzz (mgr.14195) 669 : audit [DBG] from='client.15774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:18.653272+0000 mon.vm02 (mon.0) 1047 : audit [DBG] from='client.? 192.168.123.102:0/4064109594' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:18.653272+0000 mon.vm02 (mon.0) 1047 : audit [DBG] from='client.? 192.168.123.102:0/4064109594' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:19.252611+0000 mon.vm02 (mon.0) 1048 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:19.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:19 vm02 bash[17473]: audit 2026-03-10T08:48:19.252611+0000 mon.vm02 (mon.0) 1048 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:21.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:21 vm02 bash[17473]: cluster 2026-03-10T08:48:20.132066+0000 mgr.vm02.ttibzz (mgr.14195) 670 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:21.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:21 vm02 bash[17473]: cluster 2026-03-10T08:48:20.132066+0000 mgr.vm02.ttibzz (mgr.14195) 670 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:23.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:23 vm02 bash[17473]: cluster 2026-03-10T08:48:22.132513+0000 mgr.vm02.ttibzz (mgr.14195) 671 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:23.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:23 vm02 bash[17473]: cluster 2026-03-10T08:48:22.132513+0000 mgr.vm02.ttibzz (mgr.14195) 671 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:23.835 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:23.999 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:23.999 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:23.999 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:23.999 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:23.999 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:24.195 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:24.195 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:24.195 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:24.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:24 vm02 bash[17473]: audit 2026-03-10T08:48:23.823375+0000 mgr.vm02.ttibzz (mgr.14195) 672 : audit [DBG] from='client.15782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:24.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:24 vm02 bash[17473]: audit 2026-03-10T08:48:23.823375+0000 mgr.vm02.ttibzz (mgr.14195) 672 : audit [DBG] from='client.15782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:24.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:24 vm02 bash[17473]: audit 2026-03-10T08:48:24.199930+0000 mon.vm02 (mon.0) 1049 : audit [DBG] from='client.? 192.168.123.102:0/252832453' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:24.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:24 vm02 bash[17473]: audit 2026-03-10T08:48:24.199930+0000 mon.vm02 (mon.0) 1049 : audit [DBG] from='client.? 192.168.123.102:0/252832453' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:25.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:25 vm02 bash[17473]: audit 2026-03-10T08:48:24.000836+0000 mgr.vm02.ttibzz (mgr.14195) 673 : audit [DBG] from='client.15786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:25.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:25 vm02 bash[17473]: audit 2026-03-10T08:48:24.000836+0000 mgr.vm02.ttibzz (mgr.14195) 673 : audit [DBG] from='client.15786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:25.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:25 vm02 bash[17473]: cluster 2026-03-10T08:48:24.132959+0000 mgr.vm02.ttibzz (mgr.14195) 674 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:25.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:25 vm02 bash[17473]: cluster 2026-03-10T08:48:24.132959+0000 mgr.vm02.ttibzz (mgr.14195) 674 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:27.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:27 vm02 bash[17473]: cluster 2026-03-10T08:48:26.133590+0000 mgr.vm02.ttibzz (mgr.14195) 675 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:27.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:27 vm02 bash[17473]: cluster 2026-03-10T08:48:26.133590+0000 mgr.vm02.ttibzz (mgr.14195) 675 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:29.392 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:29.553 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:29.553 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:29.553 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (2m) 2m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:29.553 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:29.553 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:29.751 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:29.751 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:29.751 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:29.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:29 vm02 bash[17473]: cluster 2026-03-10T08:48:28.133962+0000 mgr.vm02.ttibzz (mgr.14195) 676 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:48:29.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:29 vm02 bash[17473]: cluster 2026-03-10T08:48:28.133962+0000 mgr.vm02.ttibzz (mgr.14195) 676 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:48:30.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:30 vm02 bash[17473]: audit 2026-03-10T08:48:29.379788+0000 mgr.vm02.ttibzz (mgr.14195) 677 : audit [DBG] from='client.15794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:30.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:30 vm02 bash[17473]: audit 2026-03-10T08:48:29.379788+0000 mgr.vm02.ttibzz (mgr.14195) 677 : audit [DBG] from='client.15794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:30.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:30 vm02 bash[17473]: audit 2026-03-10T08:48:29.555391+0000 mgr.vm02.ttibzz (mgr.14195) 678 : audit [DBG] from='client.25175 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:30.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:30 vm02 bash[17473]: audit 2026-03-10T08:48:29.555391+0000 mgr.vm02.ttibzz (mgr.14195) 678 : audit [DBG] from='client.25175 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:30.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:30 vm02 bash[17473]: audit 2026-03-10T08:48:29.756356+0000 mon.vm02 (mon.0) 1050 : audit [DBG] from='client.? 192.168.123.102:0/2021648940' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:30.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:30 vm02 bash[17473]: audit 2026-03-10T08:48:29.756356+0000 mon.vm02 (mon.0) 1050 : audit [DBG] from='client.? 192.168.123.102:0/2021648940' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:31.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:31 vm02 bash[17473]: cluster 2026-03-10T08:48:30.134368+0000 mgr.vm02.ttibzz (mgr.14195) 679 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:31.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:31 vm02 bash[17473]: cluster 2026-03-10T08:48:30.134368+0000 mgr.vm02.ttibzz (mgr.14195) 679 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:33.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:33 vm02 bash[17473]: cluster 2026-03-10T08:48:32.134826+0000 mgr.vm02.ttibzz (mgr.14195) 680 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:33.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:33 vm02 bash[17473]: cluster 2026-03-10T08:48:32.134826+0000 mgr.vm02.ttibzz (mgr.14195) 680 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:34.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:34 vm02 bash[17473]: audit 2026-03-10T08:48:34.252862+0000 mon.vm02 (mon.0) 1051 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:34.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:34 vm02 bash[17473]: audit 2026-03-10T08:48:34.252862+0000 mon.vm02 (mon.0) 1051 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:34.934 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:35.095 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:35.095 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 2m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:35.095 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 2m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:35.095 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:35.095 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:35.297 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:35.297 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:35.297 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:35.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:35 vm02 bash[17473]: cluster 2026-03-10T08:48:34.135391+0000 mgr.vm02.ttibzz (mgr.14195) 681 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:35.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:35 vm02 bash[17473]: cluster 2026-03-10T08:48:34.135391+0000 mgr.vm02.ttibzz (mgr.14195) 681 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:35.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:35 vm02 bash[17473]: audit 2026-03-10T08:48:34.922956+0000 mgr.vm02.ttibzz (mgr.14195) 682 : audit [DBG] from='client.15804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:35.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:35 vm02 bash[17473]: audit 2026-03-10T08:48:34.922956+0000 mgr.vm02.ttibzz (mgr.14195) 682 : audit [DBG] from='client.15804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:35.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:35 vm02 bash[17473]: audit 2026-03-10T08:48:35.302248+0000 mon.vm02 (mon.0) 1052 : audit [DBG] from='client.? 192.168.123.102:0/3954659765' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:35.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:35 vm02 bash[17473]: audit 2026-03-10T08:48:35.302248+0000 mon.vm02 (mon.0) 1052 : audit [DBG] from='client.? 192.168.123.102:0/3954659765' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:36.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:36 vm02 bash[17473]: audit 2026-03-10T08:48:35.098134+0000 mgr.vm02.ttibzz (mgr.14195) 683 : audit [DBG] from='client.25185 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:36.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:36 vm02 bash[17473]: audit 2026-03-10T08:48:35.098134+0000 mgr.vm02.ttibzz (mgr.14195) 683 : audit [DBG] from='client.25185 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:37.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:37 vm02 bash[17473]: cluster 2026-03-10T08:48:36.135838+0000 mgr.vm02.ttibzz (mgr.14195) 684 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:37.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:37 vm02 bash[17473]: cluster 2026-03-10T08:48:36.135838+0000 mgr.vm02.ttibzz (mgr.14195) 684 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:40.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:39 vm02 bash[17473]: cluster 2026-03-10T08:48:38.136292+0000 mgr.vm02.ttibzz (mgr.14195) 685 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:40.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:39 vm02 bash[17473]: cluster 2026-03-10T08:48:38.136292+0000 mgr.vm02.ttibzz (mgr.14195) 685 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:40.476 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:40.642 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:40.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 3m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:40.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:40.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:40.642 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:40.842 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:40.842 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:40.842 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: cluster 2026-03-10T08:48:40.136646+0000 mgr.vm02.ttibzz (mgr.14195) 686 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: cluster 2026-03-10T08:48:40.136646+0000 mgr.vm02.ttibzz (mgr.14195) 686 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: audit 2026-03-10T08:48:40.464740+0000 mgr.vm02.ttibzz (mgr.14195) 687 : audit [DBG] from='client.25191 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: audit 2026-03-10T08:48:40.464740+0000 mgr.vm02.ttibzz (mgr.14195) 687 : audit [DBG] from='client.25191 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: audit 2026-03-10T08:48:40.644824+0000 mgr.vm02.ttibzz (mgr.14195) 688 : audit [DBG] from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: audit 2026-03-10T08:48:40.644824+0000 mgr.vm02.ttibzz (mgr.14195) 688 : audit [DBG] from='client.25195 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: audit 2026-03-10T08:48:40.847025+0000 mon.vm02 (mon.0) 1053 : audit [DBG] from='client.? 192.168.123.102:0/2646280676' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:41 vm02 bash[17473]: audit 2026-03-10T08:48:40.847025+0000 mon.vm02 (mon.0) 1053 : audit [DBG] from='client.? 192.168.123.102:0/2646280676' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:44.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:43 vm02 bash[17473]: cluster 2026-03-10T08:48:42.137142+0000 mgr.vm02.ttibzz (mgr.14195) 689 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:44.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:43 vm02 bash[17473]: cluster 2026-03-10T08:48:42.137142+0000 mgr.vm02.ttibzz (mgr.14195) 689 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:46.024 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:46.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:45 vm02 bash[17473]: cluster 2026-03-10T08:48:44.137548+0000 mgr.vm02.ttibzz (mgr.14195) 690 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:46.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:45 vm02 bash[17473]: cluster 2026-03-10T08:48:44.137548+0000 mgr.vm02.ttibzz (mgr.14195) 690 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:46.183 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:46.183 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 3m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:46.183 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:46.183 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:46.183 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:46.382 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:46.382 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:46.382 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:47.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:46 vm02 bash[17473]: audit 2026-03-10T08:48:46.387423+0000 mon.vm02 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.102:0/1662546950' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:47.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:46 vm02 bash[17473]: audit 2026-03-10T08:48:46.387423+0000 mon.vm02 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.102:0/1662546950' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:47 vm02 bash[17473]: audit 2026-03-10T08:48:46.013393+0000 mgr.vm02.ttibzz (mgr.14195) 691 : audit [DBG] from='client.15828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:47 vm02 bash[17473]: audit 2026-03-10T08:48:46.013393+0000 mgr.vm02.ttibzz (mgr.14195) 691 : audit [DBG] from='client.15828 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:47 vm02 bash[17473]: cluster 2026-03-10T08:48:46.137920+0000 mgr.vm02.ttibzz (mgr.14195) 692 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:47 vm02 bash[17473]: cluster 2026-03-10T08:48:46.137920+0000 mgr.vm02.ttibzz (mgr.14195) 692 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:47 vm02 bash[17473]: audit 2026-03-10T08:48:46.186146+0000 mgr.vm02.ttibzz (mgr.14195) 693 : audit [DBG] from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:47 vm02 bash[17473]: audit 2026-03-10T08:48:46.186146+0000 mgr.vm02.ttibzz (mgr.14195) 693 : audit [DBG] from='client.15832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:50.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:49 vm02 bash[17473]: cluster 2026-03-10T08:48:48.138403+0000 mgr.vm02.ttibzz (mgr.14195) 694 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:50.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:49 vm02 bash[17473]: cluster 2026-03-10T08:48:48.138403+0000 mgr.vm02.ttibzz (mgr.14195) 694 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:50.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:49 vm02 bash[17473]: audit 2026-03-10T08:48:49.252926+0000 mon.vm02 (mon.0) 1055 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:50.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:49 vm02 bash[17473]: audit 2026-03-10T08:48:49.252926+0000 mon.vm02 (mon.0) 1055 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:48:51.579 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:51.751 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:51.751 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (3m) 3m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:51.751 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:51.751 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:51.751 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:51.948 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:51.948 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:51.948 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:52.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:51 vm02 bash[17473]: cluster 2026-03-10T08:48:50.138887+0000 mgr.vm02.ttibzz (mgr.14195) 695 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:52.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:51 vm02 bash[17473]: cluster 2026-03-10T08:48:50.138887+0000 mgr.vm02.ttibzz (mgr.14195) 695 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:48:53.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:52 vm02 bash[17473]: audit 2026-03-10T08:48:51.569967+0000 mgr.vm02.ttibzz (mgr.14195) 696 : audit [DBG] from='client.15840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:53.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:52 vm02 bash[17473]: audit 2026-03-10T08:48:51.569967+0000 mgr.vm02.ttibzz (mgr.14195) 696 : audit [DBG] from='client.15840 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:53.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:52 vm02 bash[17473]: audit 2026-03-10T08:48:51.753754+0000 mgr.vm02.ttibzz (mgr.14195) 697 : audit [DBG] from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:53.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:52 vm02 bash[17473]: audit 2026-03-10T08:48:51.753754+0000 mgr.vm02.ttibzz (mgr.14195) 697 : audit [DBG] from='client.15844 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:53.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:52 vm02 bash[17473]: audit 2026-03-10T08:48:51.953219+0000 mon.vm02 (mon.0) 1056 : audit [DBG] from='client.? 192.168.123.102:0/3740992973' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:53.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:52 vm02 bash[17473]: audit 2026-03-10T08:48:51.953219+0000 mon.vm02 (mon.0) 1056 : audit [DBG] from='client.? 192.168.123.102:0/3740992973' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:53 vm02 bash[17473]: cluster 2026-03-10T08:48:52.139400+0000 mgr.vm02.ttibzz (mgr.14195) 698 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:53 vm02 bash[17473]: cluster 2026-03-10T08:48:52.139400+0000 mgr.vm02.ttibzz (mgr.14195) 698 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:56.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:55 vm02 bash[17473]: cluster 2026-03-10T08:48:54.139849+0000 mgr.vm02.ttibzz (mgr.14195) 699 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:56.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:55 vm02 bash[17473]: cluster 2026-03-10T08:48:54.139849+0000 mgr.vm02.ttibzz (mgr.14195) 699 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:57.129 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:48:57.287 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:48:57.287 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:48:57.287 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:48:57.287 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 9m - - 2026-03-10T08:48:57.287 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:48:57.484 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:48:57.484 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:48:57.484 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:48:58.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:57 vm02 bash[17473]: cluster 2026-03-10T08:48:56.140261+0000 mgr.vm02.ttibzz (mgr.14195) 700 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:58.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:57 vm02 bash[17473]: cluster 2026-03-10T08:48:56.140261+0000 mgr.vm02.ttibzz (mgr.14195) 700 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:48:58.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:57 vm02 bash[17473]: audit 2026-03-10T08:48:57.489636+0000 mon.vm02 (mon.0) 1057 : audit [DBG] from='client.? 192.168.123.102:0/2613677905' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:58.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:57 vm02 bash[17473]: audit 2026-03-10T08:48:57.489636+0000 mon.vm02 (mon.0) 1057 : audit [DBG] from='client.? 192.168.123.102:0/2613677905' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:48:59.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:58 vm02 bash[17473]: audit 2026-03-10T08:48:57.117510+0000 mgr.vm02.ttibzz (mgr.14195) 701 : audit [DBG] from='client.15852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:59.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:58 vm02 bash[17473]: audit 2026-03-10T08:48:57.117510+0000 mgr.vm02.ttibzz (mgr.14195) 701 : audit [DBG] from='client.15852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:59.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:58 vm02 bash[17473]: audit 2026-03-10T08:48:57.290056+0000 mgr.vm02.ttibzz (mgr.14195) 702 : audit [DBG] from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:48:59.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:58 vm02 bash[17473]: audit 2026-03-10T08:48:57.290056+0000 mgr.vm02.ttibzz (mgr.14195) 702 : audit [DBG] from='client.15856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:00.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:59 vm02 bash[17473]: cluster 2026-03-10T08:48:58.140617+0000 mgr.vm02.ttibzz (mgr.14195) 703 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:49:00.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:48:59 vm02 bash[17473]: cluster 2026-03-10T08:48:58.140617+0000 mgr.vm02.ttibzz (mgr.14195) 703 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:49:02.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:01 vm02 bash[17473]: cluster 2026-03-10T08:49:00.140980+0000 mgr.vm02.ttibzz (mgr.14195) 704 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:02.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:01 vm02 bash[17473]: cluster 2026-03-10T08:49:00.140980+0000 mgr.vm02.ttibzz (mgr.14195) 704 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:02.662 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:02.823 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:02.823 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 9m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:02.823 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 9m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:02.823 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 2m ago 10m - - 2026-03-10T08:49:02.823 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (9m) 2m ago 9m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:03.015 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:03.015 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:03.015 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: cluster 2026-03-10T08:49:02.141522+0000 mgr.vm02.ttibzz (mgr.14195) 705 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: cluster 2026-03-10T08:49:02.141522+0000 mgr.vm02.ttibzz (mgr.14195) 705 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: audit 2026-03-10T08:49:02.650239+0000 mgr.vm02.ttibzz (mgr.14195) 706 : audit [DBG] from='client.15864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: audit 2026-03-10T08:49:02.650239+0000 mgr.vm02.ttibzz (mgr.14195) 706 : audit [DBG] from='client.15864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: audit 2026-03-10T08:49:02.824442+0000 mgr.vm02.ttibzz (mgr.14195) 707 : audit [DBG] from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: audit 2026-03-10T08:49:02.824442+0000 mgr.vm02.ttibzz (mgr.14195) 707 : audit [DBG] from='client.15868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: audit 2026-03-10T08:49:03.020563+0000 mon.vm02 (mon.0) 1058 : audit [DBG] from='client.? 192.168.123.102:0/2254842644' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:03 vm02 bash[17473]: audit 2026-03-10T08:49:03.020563+0000 mon.vm02 (mon.0) 1058 : audit [DBG] from='client.? 192.168.123.102:0/2254842644' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.183939+0000 mon.vm02 (mon.0) 1059 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.183939+0000 mon.vm02 (mon.0) 1059 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.253433+0000 mon.vm02 (mon.0) 1060 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.253433+0000 mon.vm02 (mon.0) 1060 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.532586+0000 mon.vm02 (mon.0) 1061 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.532586+0000 mon.vm02 (mon.0) 1061 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.533215+0000 mon.vm02 (mon.0) 1062 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.533215+0000 mon.vm02 (mon.0) 1062 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.538994+0000 mon.vm02 (mon.0) 1063 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.538994+0000 mon.vm02 (mon.0) 1063 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.541096+0000 mon.vm02 (mon.0) 1064 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:49:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:04 vm02 bash[17473]: audit 2026-03-10T08:49:04.541096+0000 mon.vm02 (mon.0) 1064 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:49:06.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:05 vm02 bash[17473]: cluster 2026-03-10T08:49:04.141933+0000 mgr.vm02.ttibzz (mgr.14195) 708 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:06.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:05 vm02 bash[17473]: cluster 2026-03-10T08:49:04.141933+0000 mgr.vm02.ttibzz (mgr.14195) 708 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:06.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:05 vm02 bash[17473]: cluster 2026-03-10T08:49:04.534143+0000 mgr.vm02.ttibzz (mgr.14195) 709 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:49:06.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:05 vm02 bash[17473]: cluster 2026-03-10T08:49:04.534143+0000 mgr.vm02.ttibzz (mgr.14195) 709 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:49:06.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:05 vm02 bash[17473]: cluster 2026-03-10T08:49:04.534322+0000 mgr.vm02.ttibzz (mgr.14195) 710 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:06.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:05 vm02 bash[17473]: cluster 2026-03-10T08:49:04.534322+0000 mgr.vm02.ttibzz (mgr.14195) 710 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:08.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:07 vm02 bash[17473]: cluster 2026-03-10T08:49:06.534667+0000 mgr.vm02.ttibzz (mgr.14195) 711 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:08.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:07 vm02 bash[17473]: cluster 2026-03-10T08:49:06.534667+0000 mgr.vm02.ttibzz (mgr.14195) 711 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:08.199 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:08.360 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:08.360 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:08.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:08.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:08.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:08.548 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:08.548 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:08.548 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:09.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:08 vm02 bash[17473]: audit 2026-03-10T08:49:08.548664+0000 mon.vm07 (mon.1) 41 : audit [DBG] from='client.? 192.168.123.102:0/3130178453' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:09.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:08 vm02 bash[17473]: audit 2026-03-10T08:49:08.548664+0000 mon.vm07 (mon.1) 41 : audit [DBG] from='client.? 192.168.123.102:0/3130178453' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:09 vm02 bash[17473]: audit 2026-03-10T08:49:08.187311+0000 mgr.vm02.ttibzz (mgr.14195) 712 : audit [DBG] from='client.15876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:09 vm02 bash[17473]: audit 2026-03-10T08:49:08.187311+0000 mgr.vm02.ttibzz (mgr.14195) 712 : audit [DBG] from='client.15876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:09 vm02 bash[17473]: audit 2026-03-10T08:49:08.360817+0000 mgr.vm02.ttibzz (mgr.14195) 713 : audit [DBG] from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:09 vm02 bash[17473]: audit 2026-03-10T08:49:08.360817+0000 mgr.vm02.ttibzz (mgr.14195) 713 : audit [DBG] from='client.15880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:09 vm02 bash[17473]: cluster 2026-03-10T08:49:08.535055+0000 mgr.vm02.ttibzz (mgr.14195) 714 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:49:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:09 vm02 bash[17473]: cluster 2026-03-10T08:49:08.535055+0000 mgr.vm02.ttibzz (mgr.14195) 714 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:49:12.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:11 vm02 bash[17473]: cluster 2026-03-10T08:49:10.535524+0000 mgr.vm02.ttibzz (mgr.14195) 715 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:12.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:11 vm02 bash[17473]: cluster 2026-03-10T08:49:10.535524+0000 mgr.vm02.ttibzz (mgr.14195) 715 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:13.798 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:13.952 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:13.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:13.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:13.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:13.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:14.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:13 vm02 bash[17473]: cluster 2026-03-10T08:49:12.535857+0000 mgr.vm02.ttibzz (mgr.14195) 716 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:14.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:13 vm02 bash[17473]: cluster 2026-03-10T08:49:12.535857+0000 mgr.vm02.ttibzz (mgr.14195) 716 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 243 B/s wr, 0 op/s 2026-03-10T08:49:14.144 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:14.144 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:14.144 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:15.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:14 vm02 bash[17473]: audit 2026-03-10T08:49:13.787854+0000 mgr.vm02.ttibzz (mgr.14195) 717 : audit [DBG] from='client.15888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:15.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:14 vm02 bash[17473]: audit 2026-03-10T08:49:13.787854+0000 mgr.vm02.ttibzz (mgr.14195) 717 : audit [DBG] from='client.15888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:15.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:14 vm02 bash[17473]: audit 2026-03-10T08:49:13.955101+0000 mgr.vm02.ttibzz (mgr.14195) 718 : audit [DBG] from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:15.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:14 vm02 bash[17473]: audit 2026-03-10T08:49:13.955101+0000 mgr.vm02.ttibzz (mgr.14195) 718 : audit [DBG] from='client.15892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:15.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:14 vm02 bash[17473]: audit 2026-03-10T08:49:14.150015+0000 mon.vm02 (mon.0) 1065 : audit [DBG] from='client.? 192.168.123.102:0/1478226356' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:15.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:14 vm02 bash[17473]: audit 2026-03-10T08:49:14.150015+0000 mon.vm02 (mon.0) 1065 : audit [DBG] from='client.? 192.168.123.102:0/1478226356' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:16.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:15 vm02 bash[17473]: cluster 2026-03-10T08:49:14.536247+0000 mgr.vm02.ttibzz (mgr.14195) 719 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 409 B/s wr, 11 op/s 2026-03-10T08:49:16.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:15 vm02 bash[17473]: cluster 2026-03-10T08:49:14.536247+0000 mgr.vm02.ttibzz (mgr.14195) 719 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 409 B/s wr, 11 op/s 2026-03-10T08:49:18.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:17 vm02 bash[17473]: cluster 2026-03-10T08:49:16.536607+0000 mgr.vm02.ttibzz (mgr.14195) 720 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 341 B/s wr, 38 op/s 2026-03-10T08:49:18.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:17 vm02 bash[17473]: cluster 2026-03-10T08:49:16.536607+0000 mgr.vm02.ttibzz (mgr.14195) 720 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 341 B/s wr, 38 op/s 2026-03-10T08:49:19.327 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:19.487 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:19.488 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:19.488 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:19.488 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:19.488 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:19.676 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:19.676 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:19.676 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:19 vm02 bash[17473]: cluster 2026-03-10T08:49:18.536934+0000 mgr.vm02.ttibzz (mgr.14195) 721 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:49:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:19 vm02 bash[17473]: cluster 2026-03-10T08:49:18.536934+0000 mgr.vm02.ttibzz (mgr.14195) 721 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:49:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:19 vm02 bash[17473]: audit 2026-03-10T08:49:19.253489+0000 mon.vm02 (mon.0) 1066 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:19 vm02 bash[17473]: audit 2026-03-10T08:49:19.253489+0000 mon.vm02 (mon.0) 1066 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:19 vm02 bash[17473]: audit 2026-03-10T08:49:19.681631+0000 mon.vm02 (mon.0) 1067 : audit [DBG] from='client.? 192.168.123.102:0/3437575292' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:19 vm02 bash[17473]: audit 2026-03-10T08:49:19.681631+0000 mon.vm02 (mon.0) 1067 : audit [DBG] from='client.? 192.168.123.102:0/3437575292' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:20 vm02 bash[17473]: audit 2026-03-10T08:49:19.316408+0000 mgr.vm02.ttibzz (mgr.14195) 722 : audit [DBG] from='client.15900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:20 vm02 bash[17473]: audit 2026-03-10T08:49:19.316408+0000 mgr.vm02.ttibzz (mgr.14195) 722 : audit [DBG] from='client.15900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:20 vm02 bash[17473]: audit 2026-03-10T08:49:19.490448+0000 mgr.vm02.ttibzz (mgr.14195) 723 : audit [DBG] from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:20 vm02 bash[17473]: audit 2026-03-10T08:49:19.490448+0000 mgr.vm02.ttibzz (mgr.14195) 723 : audit [DBG] from='client.15904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:22.093 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:21 vm02 bash[17473]: cluster 2026-03-10T08:49:20.537381+0000 mgr.vm02.ttibzz (mgr.14195) 724 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:49:22.094 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:21 vm02 bash[17473]: cluster 2026-03-10T08:49:20.537381+0000 mgr.vm02.ttibzz (mgr.14195) 724 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:49:24.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:23 vm02 bash[17473]: cluster 2026-03-10T08:49:22.537760+0000 mgr.vm02.ttibzz (mgr.14195) 725 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:49:24.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:23 vm02 bash[17473]: cluster 2026-03-10T08:49:22.537760+0000 mgr.vm02.ttibzz (mgr.14195) 725 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:49:24.847 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:25.000 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:25.000 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:25.000 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:25.000 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:25.000 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:25.194 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:25.194 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:25.194 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:26.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:25 vm02 bash[17473]: cluster 2026-03-10T08:49:24.538140+0000 mgr.vm02.ttibzz (mgr.14195) 726 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:49:26.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:25 vm02 bash[17473]: cluster 2026-03-10T08:49:24.538140+0000 mgr.vm02.ttibzz (mgr.14195) 726 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:49:26.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:25 vm02 bash[17473]: audit 2026-03-10T08:49:24.838496+0000 mgr.vm02.ttibzz (mgr.14195) 727 : audit [DBG] from='client.15912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:26.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:25 vm02 bash[17473]: audit 2026-03-10T08:49:24.838496+0000 mgr.vm02.ttibzz (mgr.14195) 727 : audit [DBG] from='client.15912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:26.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:25 vm02 bash[17473]: audit 2026-03-10T08:49:25.199468+0000 mon.vm02 (mon.0) 1068 : audit [DBG] from='client.? 192.168.123.102:0/726113167' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:26.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:25 vm02 bash[17473]: audit 2026-03-10T08:49:25.199468+0000 mon.vm02 (mon.0) 1068 : audit [DBG] from='client.? 192.168.123.102:0/726113167' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:27.150 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:26 vm02 bash[17473]: audit 2026-03-10T08:49:25.003158+0000 mgr.vm02.ttibzz (mgr.14195) 728 : audit [DBG] from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:27.150 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:26 vm02 bash[17473]: audit 2026-03-10T08:49:25.003158+0000 mgr.vm02.ttibzz (mgr.14195) 728 : audit [DBG] from='client.15916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:28.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:27 vm02 bash[17473]: cluster 2026-03-10T08:49:26.538564+0000 mgr.vm02.ttibzz (mgr.14195) 729 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 50 op/s 2026-03-10T08:49:28.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:27 vm02 bash[17473]: cluster 2026-03-10T08:49:26.538564+0000 mgr.vm02.ttibzz (mgr.14195) 729 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 0 B/s wr, 50 op/s 2026-03-10T08:49:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:29 vm02 bash[17473]: cluster 2026-03-10T08:49:28.539125+0000 mgr.vm02.ttibzz (mgr.14195) 730 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 21 op/s 2026-03-10T08:49:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:29 vm02 bash[17473]: cluster 2026-03-10T08:49:28.539125+0000 mgr.vm02.ttibzz (mgr.14195) 730 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 21 op/s 2026-03-10T08:49:30.373 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:30.529 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:30.529 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:30.529 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (3m) 3m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:30.529 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:30.529 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:30.726 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:30.726 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:30.726 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:30 vm02 bash[17473]: audit 2026-03-10T08:49:30.731529+0000 mon.vm02 (mon.0) 1069 : audit [DBG] from='client.? 192.168.123.102:0/3043444993' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:30 vm02 bash[17473]: audit 2026-03-10T08:49:30.731529+0000 mon.vm02 (mon.0) 1069 : audit [DBG] from='client.? 192.168.123.102:0/3043444993' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:32.208 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:31 vm02 bash[17473]: audit 2026-03-10T08:49:30.362459+0000 mgr.vm02.ttibzz (mgr.14195) 731 : audit [DBG] from='client.15924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:32.208 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:31 vm02 bash[17473]: audit 2026-03-10T08:49:30.362459+0000 mgr.vm02.ttibzz (mgr.14195) 731 : audit [DBG] from='client.15924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:32.208 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:31 vm02 bash[17473]: audit 2026-03-10T08:49:30.531419+0000 mgr.vm02.ttibzz (mgr.14195) 732 : audit [DBG] from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:32.208 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:31 vm02 bash[17473]: audit 2026-03-10T08:49:30.531419+0000 mgr.vm02.ttibzz (mgr.14195) 732 : audit [DBG] from='client.15928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:32.208 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:31 vm02 bash[17473]: cluster 2026-03-10T08:49:30.539649+0000 mgr.vm02.ttibzz (mgr.14195) 733 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:32.208 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:31 vm02 bash[17473]: cluster 2026-03-10T08:49:30.539649+0000 mgr.vm02.ttibzz (mgr.14195) 733 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:34.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:33 vm02 bash[17473]: cluster 2026-03-10T08:49:32.540125+0000 mgr.vm02.ttibzz (mgr.14195) 734 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:34.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:33 vm02 bash[17473]: cluster 2026-03-10T08:49:32.540125+0000 mgr.vm02.ttibzz (mgr.14195) 734 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:34 vm02 bash[17473]: audit 2026-03-10T08:49:34.253792+0000 mon.vm02 (mon.0) 1070 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:34 vm02 bash[17473]: audit 2026-03-10T08:49:34.253792+0000 mon.vm02 (mon.0) 1070 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:35.909 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:36.172 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:36.172 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 3m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:36.172 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 3m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:36.172 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:36.172 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:36.414 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:36.415 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:36.415 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:36.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:36 vm02 bash[17473]: cluster 2026-03-10T08:49:34.540599+0000 mgr.vm02.ttibzz (mgr.14195) 735 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:36.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:36 vm02 bash[17473]: cluster 2026-03-10T08:49:34.540599+0000 mgr.vm02.ttibzz (mgr.14195) 735 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: audit 2026-03-10T08:49:35.899088+0000 mgr.vm02.ttibzz (mgr.14195) 736 : audit [DBG] from='client.15936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: audit 2026-03-10T08:49:35.899088+0000 mgr.vm02.ttibzz (mgr.14195) 736 : audit [DBG] from='client.15936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: audit 2026-03-10T08:49:36.174960+0000 mgr.vm02.ttibzz (mgr.14195) 737 : audit [DBG] from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: audit 2026-03-10T08:49:36.174960+0000 mgr.vm02.ttibzz (mgr.14195) 737 : audit [DBG] from='client.15940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: audit 2026-03-10T08:49:36.420212+0000 mon.vm02 (mon.0) 1071 : audit [DBG] from='client.? 192.168.123.102:0/3123756525' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: audit 2026-03-10T08:49:36.420212+0000 mon.vm02 (mon.0) 1071 : audit [DBG] from='client.? 192.168.123.102:0/3123756525' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: cluster 2026-03-10T08:49:36.541031+0000 mgr.vm02.ttibzz (mgr.14195) 738 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:37 vm02 bash[17473]: cluster 2026-03-10T08:49:36.541031+0000 mgr.vm02.ttibzz (mgr.14195) 738 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:40.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:39 vm02 bash[17473]: cluster 2026-03-10T08:49:38.541409+0000 mgr.vm02.ttibzz (mgr.14195) 739 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:40.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:39 vm02 bash[17473]: cluster 2026-03-10T08:49:38.541409+0000 mgr.vm02.ttibzz (mgr.14195) 739 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:41.587 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:41.735 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:41.735 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 4m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:41.735 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:41.735 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:41.735 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:41.928 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:41.928 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:41.928 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:42.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:41 vm02 bash[17473]: cluster 2026-03-10T08:49:40.541796+0000 mgr.vm02.ttibzz (mgr.14195) 740 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:41 vm02 bash[17473]: cluster 2026-03-10T08:49:40.541796+0000 mgr.vm02.ttibzz (mgr.14195) 740 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:42 vm02 bash[17473]: audit 2026-03-10T08:49:41.577016+0000 mgr.vm02.ttibzz (mgr.14195) 741 : audit [DBG] from='client.15948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:42 vm02 bash[17473]: audit 2026-03-10T08:49:41.577016+0000 mgr.vm02.ttibzz (mgr.14195) 741 : audit [DBG] from='client.15948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:42 vm02 bash[17473]: audit 2026-03-10T08:49:41.737905+0000 mgr.vm02.ttibzz (mgr.14195) 742 : audit [DBG] from='client.25265 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:42 vm02 bash[17473]: audit 2026-03-10T08:49:41.737905+0000 mgr.vm02.ttibzz (mgr.14195) 742 : audit [DBG] from='client.25265 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:42 vm02 bash[17473]: audit 2026-03-10T08:49:41.933091+0000 mon.vm02 (mon.0) 1072 : audit [DBG] from='client.? 192.168.123.102:0/45660856' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:43.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:42 vm02 bash[17473]: audit 2026-03-10T08:49:41.933091+0000 mon.vm02 (mon.0) 1072 : audit [DBG] from='client.? 192.168.123.102:0/45660856' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:44.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:43 vm02 bash[17473]: cluster 2026-03-10T08:49:42.542137+0000 mgr.vm02.ttibzz (mgr.14195) 743 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:44.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:43 vm02 bash[17473]: cluster 2026-03-10T08:49:42.542137+0000 mgr.vm02.ttibzz (mgr.14195) 743 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:45 vm02 bash[17473]: cluster 2026-03-10T08:49:44.542445+0000 mgr.vm02.ttibzz (mgr.14195) 744 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:46.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:45 vm02 bash[17473]: cluster 2026-03-10T08:49:44.542445+0000 mgr.vm02.ttibzz (mgr.14195) 744 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:47.099 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:47.253 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:47.253 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 4m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:47.253 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:47.253 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:47.253 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:47.459 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:47.459 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:47.459 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:48.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:47 vm02 bash[17473]: cluster 2026-03-10T08:49:46.542767+0000 mgr.vm02.ttibzz (mgr.14195) 745 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:48.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:47 vm02 bash[17473]: cluster 2026-03-10T08:49:46.542767+0000 mgr.vm02.ttibzz (mgr.14195) 745 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:48.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:47 vm02 bash[17473]: audit 2026-03-10T08:49:47.464658+0000 mon.vm02 (mon.0) 1073 : audit [DBG] from='client.? 192.168.123.102:0/3697539960' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:48.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:47 vm02 bash[17473]: audit 2026-03-10T08:49:47.464658+0000 mon.vm02 (mon.0) 1073 : audit [DBG] from='client.? 192.168.123.102:0/3697539960' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:49.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:48 vm02 bash[17473]: audit 2026-03-10T08:49:47.090050+0000 mgr.vm02.ttibzz (mgr.14195) 746 : audit [DBG] from='client.15960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:49.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:48 vm02 bash[17473]: audit 2026-03-10T08:49:47.090050+0000 mgr.vm02.ttibzz (mgr.14195) 746 : audit [DBG] from='client.15960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:49.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:48 vm02 bash[17473]: audit 2026-03-10T08:49:47.255822+0000 mgr.vm02.ttibzz (mgr.14195) 747 : audit [DBG] from='client.25275 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:49.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:48 vm02 bash[17473]: audit 2026-03-10T08:49:47.255822+0000 mgr.vm02.ttibzz (mgr.14195) 747 : audit [DBG] from='client.25275 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:50 vm02 bash[17473]: cluster 2026-03-10T08:49:48.543202+0000 mgr.vm02.ttibzz (mgr.14195) 748 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:50 vm02 bash[17473]: cluster 2026-03-10T08:49:48.543202+0000 mgr.vm02.ttibzz (mgr.14195) 748 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:50 vm02 bash[17473]: audit 2026-03-10T08:49:49.254051+0000 mon.vm02 (mon.0) 1074 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:50 vm02 bash[17473]: audit 2026-03-10T08:49:49.254051+0000 mon.vm02 (mon.0) 1074 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:49:51.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:51 vm02 bash[17473]: cluster 2026-03-10T08:49:50.543678+0000 mgr.vm02.ttibzz (mgr.14195) 749 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:51.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:51 vm02 bash[17473]: cluster 2026-03-10T08:49:50.543678+0000 mgr.vm02.ttibzz (mgr.14195) 749 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:49:52.663 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:52.830 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:52.830 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (4m) 4m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:52.830 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:52.830 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:52.830 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:53.021 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:53.021 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:53.021 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: cluster 2026-03-10T08:49:52.544028+0000 mgr.vm02.ttibzz (mgr.14195) 750 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: cluster 2026-03-10T08:49:52.544028+0000 mgr.vm02.ttibzz (mgr.14195) 750 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: audit 2026-03-10T08:49:52.651601+0000 mgr.vm02.ttibzz (mgr.14195) 751 : audit [DBG] from='client.15972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: audit 2026-03-10T08:49:52.651601+0000 mgr.vm02.ttibzz (mgr.14195) 751 : audit [DBG] from='client.15972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: audit 2026-03-10T08:49:52.833239+0000 mgr.vm02.ttibzz (mgr.14195) 752 : audit [DBG] from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: audit 2026-03-10T08:49:52.833239+0000 mgr.vm02.ttibzz (mgr.14195) 752 : audit [DBG] from='client.15976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: audit 2026-03-10T08:49:53.026498+0000 mon.vm02 (mon.0) 1075 : audit [DBG] from='client.? 192.168.123.102:0/2546733545' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:54.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:53 vm02 bash[17473]: audit 2026-03-10T08:49:53.026498+0000 mon.vm02 (mon.0) 1075 : audit [DBG] from='client.? 192.168.123.102:0/2546733545' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:56.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:55 vm02 bash[17473]: cluster 2026-03-10T08:49:54.544429+0000 mgr.vm02.ttibzz (mgr.14195) 753 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:56.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:55 vm02 bash[17473]: cluster 2026-03-10T08:49:54.544429+0000 mgr.vm02.ttibzz (mgr.14195) 753 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:49:57.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:57 vm02 bash[17473]: cluster 2026-03-10T08:49:56.544898+0000 mgr.vm02.ttibzz (mgr.14195) 754 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:49:57.789 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:57 vm02 bash[17473]: cluster 2026-03-10T08:49:56.544898+0000 mgr.vm02.ttibzz (mgr.14195) 754 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:49:58.203 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:49:58.361 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:49:58.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:49:58.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 10m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:49:58.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 3m ago 10m - - 2026-03-10T08:49:58.361 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 3m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:49:58.554 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:49:58.554 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:49:58.554 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:49:59.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:59 vm02 bash[17473]: audit 2026-03-10T08:49:58.560017+0000 mon.vm02 (mon.0) 1076 : audit [DBG] from='client.? 192.168.123.102:0/2502083829' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:49:59.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:49:59 vm02 bash[17473]: audit 2026-03-10T08:49:58.560017+0000 mon.vm02 (mon.0) 1076 : audit [DBG] from='client.? 192.168.123.102:0/2502083829' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: audit 2026-03-10T08:49:58.193940+0000 mgr.vm02.ttibzz (mgr.14195) 755 : audit [DBG] from='client.15984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: audit 2026-03-10T08:49:58.193940+0000 mgr.vm02.ttibzz (mgr.14195) 755 : audit [DBG] from='client.15984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: audit 2026-03-10T08:49:58.363904+0000 mgr.vm02.ttibzz (mgr.14195) 756 : audit [DBG] from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: audit 2026-03-10T08:49:58.363904+0000 mgr.vm02.ttibzz (mgr.14195) 756 : audit [DBG] from='client.15988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:49:58.545314+0000 mgr.vm02.ttibzz (mgr.14195) 757 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:49:58.545314+0000 mgr.vm02.ttibzz (mgr.14195) 757 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:50:00.000093+0000 mon.vm02 (mon.0) 1077 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:50:00.000093+0000 mon.vm02 (mon.0) 1077 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:50:00.000125+0000 mon.vm02 (mon.0) 1078 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:50:00.000125+0000 mon.vm02 (mon.0) 1078 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:50:00.000138+0000 mon.vm02 (mon.0) 1079 : cluster [WRN] daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:00 vm02 bash[17473]: cluster 2026-03-10T08:50:00.000138+0000 mon.vm02 (mon.0) 1079 : cluster [WRN] daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:01.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:01 vm02 bash[17473]: cluster 2026-03-10T08:50:00.545713+0000 mgr.vm02.ttibzz (mgr.14195) 758 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:01.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:01 vm02 bash[17473]: cluster 2026-03-10T08:50:00.545713+0000 mgr.vm02.ttibzz (mgr.14195) 758 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:03.728 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:03.883 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:03.883 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 10m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:03.883 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:03.883 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:03.883 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (10m) 4m ago 10m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:04.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:03 vm02 bash[17473]: cluster 2026-03-10T08:50:02.546105+0000 mgr.vm02.ttibzz (mgr.14195) 759 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:03 vm02 bash[17473]: cluster 2026-03-10T08:50:02.546105+0000 mgr.vm02.ttibzz (mgr.14195) 759 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:04.086 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:04.086 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:04.086 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:03.716798+0000 mgr.vm02.ttibzz (mgr.14195) 760 : audit [DBG] from='client.15996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:03.716798+0000 mgr.vm02.ttibzz (mgr.14195) 760 : audit [DBG] from='client.15996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:03.885945+0000 mgr.vm02.ttibzz (mgr.14195) 761 : audit [DBG] from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:03.885945+0000 mgr.vm02.ttibzz (mgr.14195) 761 : audit [DBG] from='client.16000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:04.091631+0000 mon.vm02 (mon.0) 1080 : audit [DBG] from='client.? 192.168.123.102:0/1893520333' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:04.091631+0000 mon.vm02 (mon.0) 1080 : audit [DBG] from='client.? 192.168.123.102:0/1893520333' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:04.254315+0000 mon.vm02 (mon.0) 1081 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:04.254315+0000 mon.vm02 (mon.0) 1081 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:04.582974+0000 mon.vm02 (mon.0) 1082 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:50:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:04 vm02 bash[17473]: audit 2026-03-10T08:50:04.582974+0000 mon.vm02 (mon.0) 1082 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: cluster 2026-03-10T08:50:04.546513+0000 mgr.vm02.ttibzz (mgr.14195) 762 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: cluster 2026-03-10T08:50:04.546513+0000 mgr.vm02.ttibzz (mgr.14195) 762 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.924678+0000 mon.vm02 (mon.0) 1083 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.924678+0000 mon.vm02 (mon.0) 1083 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.925443+0000 mon.vm02 (mon.0) 1084 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.925443+0000 mon.vm02 (mon.0) 1084 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: cluster 2026-03-10T08:50:04.926408+0000 mgr.vm02.ttibzz (mgr.14195) 763 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: cluster 2026-03-10T08:50:04.926408+0000 mgr.vm02.ttibzz (mgr.14195) 763 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: cluster 2026-03-10T08:50:04.926553+0000 mgr.vm02.ttibzz (mgr.14195) 764 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: cluster 2026-03-10T08:50:04.926553+0000 mgr.vm02.ttibzz (mgr.14195) 764 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.931385+0000 mon.vm02 (mon.0) 1085 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.931385+0000 mon.vm02 (mon.0) 1085 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.935362+0000 mon.vm02 (mon.0) 1086 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:50:06.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:06 vm02 bash[17473]: audit 2026-03-10T08:50:04.935362+0000 mon.vm02 (mon.0) 1086 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:50:07.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:07 vm02 bash[17473]: cluster 2026-03-10T08:50:06.926939+0000 mgr.vm02.ttibzz (mgr.14195) 765 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:07.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:07 vm02 bash[17473]: cluster 2026-03-10T08:50:06.926939+0000 mgr.vm02.ttibzz (mgr.14195) 765 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:09.268 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:09.438 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:09.438 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:09.438 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:09.438 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:09.438 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:09.632 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:09.632 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:09.632 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:10.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:09 vm02 bash[17473]: cluster 2026-03-10T08:50:08.927351+0000 mgr.vm02.ttibzz (mgr.14195) 766 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:10.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:09 vm02 bash[17473]: cluster 2026-03-10T08:50:08.927351+0000 mgr.vm02.ttibzz (mgr.14195) 766 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:10.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:09 vm02 bash[17473]: audit 2026-03-10T08:50:09.637961+0000 mon.vm02 (mon.0) 1087 : audit [DBG] from='client.? 192.168.123.102:0/1989101173' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:10.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:09 vm02 bash[17473]: audit 2026-03-10T08:50:09.637961+0000 mon.vm02 (mon.0) 1087 : audit [DBG] from='client.? 192.168.123.102:0/1989101173' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:11.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:10 vm02 bash[17473]: audit 2026-03-10T08:50:09.259379+0000 mgr.vm02.ttibzz (mgr.14195) 767 : audit [DBG] from='client.16008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:10 vm02 bash[17473]: audit 2026-03-10T08:50:09.259379+0000 mgr.vm02.ttibzz (mgr.14195) 767 : audit [DBG] from='client.16008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:10 vm02 bash[17473]: audit 2026-03-10T08:50:09.440792+0000 mgr.vm02.ttibzz (mgr.14195) 768 : audit [DBG] from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:10 vm02 bash[17473]: audit 2026-03-10T08:50:09.440792+0000 mgr.vm02.ttibzz (mgr.14195) 768 : audit [DBG] from='client.16012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:12.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:11 vm02 bash[17473]: cluster 2026-03-10T08:50:10.927843+0000 mgr.vm02.ttibzz (mgr.14195) 769 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:12.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:11 vm02 bash[17473]: cluster 2026-03-10T08:50:10.927843+0000 mgr.vm02.ttibzz (mgr.14195) 769 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:13.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:13 vm02 bash[17473]: cluster 2026-03-10T08:50:12.928249+0000 mgr.vm02.ttibzz (mgr.14195) 770 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:13.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:13 vm02 bash[17473]: cluster 2026-03-10T08:50:12.928249+0000 mgr.vm02.ttibzz (mgr.14195) 770 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-03-10T08:50:14.804 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:14.961 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:14.961 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:14.961 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:14.961 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:14.961 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:15.154 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:15.154 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:15.154 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: audit 2026-03-10T08:50:14.794964+0000 mgr.vm02.ttibzz (mgr.14195) 771 : audit [DBG] from='client.16020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: audit 2026-03-10T08:50:14.794964+0000 mgr.vm02.ttibzz (mgr.14195) 771 : audit [DBG] from='client.16020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: cluster 2026-03-10T08:50:14.928653+0000 mgr.vm02.ttibzz (mgr.14195) 772 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: cluster 2026-03-10T08:50:14.928653+0000 mgr.vm02.ttibzz (mgr.14195) 772 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: audit 2026-03-10T08:50:14.964189+0000 mgr.vm02.ttibzz (mgr.14195) 773 : audit [DBG] from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: audit 2026-03-10T08:50:14.964189+0000 mgr.vm02.ttibzz (mgr.14195) 773 : audit [DBG] from='client.16024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: audit 2026-03-10T08:50:15.159979+0000 mon.vm02 (mon.0) 1088 : audit [DBG] from='client.? 192.168.123.102:0/3543601625' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:15 vm02 bash[17473]: audit 2026-03-10T08:50:15.159979+0000 mon.vm02 (mon.0) 1088 : audit [DBG] from='client.? 192.168.123.102:0/3543601625' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:17 vm02 bash[17473]: cluster 2026-03-10T08:50:16.929178+0000 mgr.vm02.ttibzz (mgr.14195) 774 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:18.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:17 vm02 bash[17473]: cluster 2026-03-10T08:50:16.929178+0000 mgr.vm02.ttibzz (mgr.14195) 774 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:20.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:19 vm02 bash[17473]: cluster 2026-03-10T08:50:18.929609+0000 mgr.vm02.ttibzz (mgr.14195) 775 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:20.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:19 vm02 bash[17473]: cluster 2026-03-10T08:50:18.929609+0000 mgr.vm02.ttibzz (mgr.14195) 775 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:20.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:19 vm02 bash[17473]: audit 2026-03-10T08:50:19.254387+0000 mon.vm02 (mon.0) 1089 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:20.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:19 vm02 bash[17473]: audit 2026-03-10T08:50:19.254387+0000 mon.vm02 (mon.0) 1089 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:20.327 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:20.485 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:20.485 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:20.485 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:20.485 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:20.485 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:20.689 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:20.689 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:20.689 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:21.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:21 vm02 bash[17473]: audit 2026-03-10T08:50:20.694715+0000 mon.vm02 (mon.0) 1090 : audit [DBG] from='client.? 192.168.123.102:0/3293295284' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:21 vm02 bash[17473]: audit 2026-03-10T08:50:20.694715+0000 mon.vm02 (mon.0) 1090 : audit [DBG] from='client.? 192.168.123.102:0/3293295284' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:22 vm02 bash[17473]: audit 2026-03-10T08:50:20.318872+0000 mgr.vm02.ttibzz (mgr.14195) 776 : audit [DBG] from='client.16032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:22 vm02 bash[17473]: audit 2026-03-10T08:50:20.318872+0000 mgr.vm02.ttibzz (mgr.14195) 776 : audit [DBG] from='client.16032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:22 vm02 bash[17473]: audit 2026-03-10T08:50:20.488310+0000 mgr.vm02.ttibzz (mgr.14195) 777 : audit [DBG] from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:22 vm02 bash[17473]: audit 2026-03-10T08:50:20.488310+0000 mgr.vm02.ttibzz (mgr.14195) 777 : audit [DBG] from='client.16036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:22 vm02 bash[17473]: cluster 2026-03-10T08:50:20.930117+0000 mgr.vm02.ttibzz (mgr.14195) 778 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:22 vm02 bash[17473]: cluster 2026-03-10T08:50:20.930117+0000 mgr.vm02.ttibzz (mgr.14195) 778 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:23.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:23 vm02 bash[17473]: cluster 2026-03-10T08:50:22.930487+0000 mgr.vm02.ttibzz (mgr.14195) 779 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:23.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:23 vm02 bash[17473]: cluster 2026-03-10T08:50:22.930487+0000 mgr.vm02.ttibzz (mgr.14195) 779 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:25.867 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:26.027 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:26.027 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:26.027 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:26.027 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:26.027 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:26.207 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:26.207 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:26.207 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:26.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:25 vm02 bash[17473]: cluster 2026-03-10T08:50:24.930856+0000 mgr.vm02.ttibzz (mgr.14195) 780 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:26.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:25 vm02 bash[17473]: cluster 2026-03-10T08:50:24.930856+0000 mgr.vm02.ttibzz (mgr.14195) 780 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:27.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:26 vm02 bash[17473]: audit 2026-03-10T08:50:25.857674+0000 mgr.vm02.ttibzz (mgr.14195) 781 : audit [DBG] from='client.16044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:27.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:26 vm02 bash[17473]: audit 2026-03-10T08:50:25.857674+0000 mgr.vm02.ttibzz (mgr.14195) 781 : audit [DBG] from='client.16044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:27.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:26 vm02 bash[17473]: audit 2026-03-10T08:50:26.212999+0000 mon.vm02 (mon.0) 1091 : audit [DBG] from='client.? 192.168.123.102:0/850535994' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:27.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:26 vm02 bash[17473]: audit 2026-03-10T08:50:26.212999+0000 mon.vm02 (mon.0) 1091 : audit [DBG] from='client.? 192.168.123.102:0/850535994' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:28.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:27 vm02 bash[17473]: audit 2026-03-10T08:50:26.030287+0000 mgr.vm02.ttibzz (mgr.14195) 782 : audit [DBG] from='client.16048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:28.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:27 vm02 bash[17473]: audit 2026-03-10T08:50:26.030287+0000 mgr.vm02.ttibzz (mgr.14195) 782 : audit [DBG] from='client.16048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:28.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:27 vm02 bash[17473]: cluster 2026-03-10T08:50:26.931311+0000 mgr.vm02.ttibzz (mgr.14195) 783 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:28.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:27 vm02 bash[17473]: cluster 2026-03-10T08:50:26.931311+0000 mgr.vm02.ttibzz (mgr.14195) 783 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:29.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:29 vm02 bash[17473]: cluster 2026-03-10T08:50:28.931827+0000 mgr.vm02.ttibzz (mgr.14195) 784 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:29.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:29 vm02 bash[17473]: cluster 2026-03-10T08:50:28.931827+0000 mgr.vm02.ttibzz (mgr.14195) 784 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:50:31.387 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:31.545 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:31.545 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:31.545 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (4m) 4m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:31.545 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:31.545 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:31.733 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:31.733 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:31.733 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:32.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:31 vm02 bash[17473]: cluster 2026-03-10T08:50:30.932237+0000 mgr.vm02.ttibzz (mgr.14195) 785 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:32.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:31 vm02 bash[17473]: cluster 2026-03-10T08:50:30.932237+0000 mgr.vm02.ttibzz (mgr.14195) 785 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:32.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:31 vm02 bash[17473]: audit 2026-03-10T08:50:31.738625+0000 mon.vm02 (mon.0) 1092 : audit [DBG] from='client.? 192.168.123.102:0/1803085809' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:32.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:31 vm02 bash[17473]: audit 2026-03-10T08:50:31.738625+0000 mon.vm02 (mon.0) 1092 : audit [DBG] from='client.? 192.168.123.102:0/1803085809' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:32 vm02 bash[17473]: audit 2026-03-10T08:50:31.376793+0000 mgr.vm02.ttibzz (mgr.14195) 786 : audit [DBG] from='client.16056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:32 vm02 bash[17473]: audit 2026-03-10T08:50:31.376793+0000 mgr.vm02.ttibzz (mgr.14195) 786 : audit [DBG] from='client.16056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:32 vm02 bash[17473]: audit 2026-03-10T08:50:31.547185+0000 mgr.vm02.ttibzz (mgr.14195) 787 : audit [DBG] from='client.16060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:32 vm02 bash[17473]: audit 2026-03-10T08:50:31.547185+0000 mgr.vm02.ttibzz (mgr.14195) 787 : audit [DBG] from='client.16060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:34.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:34 vm02 bash[17473]: cluster 2026-03-10T08:50:32.932683+0000 mgr.vm02.ttibzz (mgr.14195) 788 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:34.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:34 vm02 bash[17473]: cluster 2026-03-10T08:50:32.932683+0000 mgr.vm02.ttibzz (mgr.14195) 788 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:35 vm02 bash[17473]: audit 2026-03-10T08:50:34.254705+0000 mon.vm02 (mon.0) 1093 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:35 vm02 bash[17473]: audit 2026-03-10T08:50:34.254705+0000 mon.vm02 (mon.0) 1093 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:36.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:36 vm02 bash[17473]: cluster 2026-03-10T08:50:34.933154+0000 mgr.vm02.ttibzz (mgr.14195) 789 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:36.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:36 vm02 bash[17473]: cluster 2026-03-10T08:50:34.933154+0000 mgr.vm02.ttibzz (mgr.14195) 789 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:36.903 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:37.052 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:37.052 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 4m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:37.052 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 4m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:37.052 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:37.052 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:37.241 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:37.241 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:37.241 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:37.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:37 vm02 bash[17473]: audit 2026-03-10T08:50:36.893025+0000 mgr.vm02.ttibzz (mgr.14195) 790 : audit [DBG] from='client.16068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:37.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:37 vm02 bash[17473]: audit 2026-03-10T08:50:36.893025+0000 mgr.vm02.ttibzz (mgr.14195) 790 : audit [DBG] from='client.16068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:37.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:37 vm02 bash[17473]: cluster 2026-03-10T08:50:36.933558+0000 mgr.vm02.ttibzz (mgr.14195) 791 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:37.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:37 vm02 bash[17473]: cluster 2026-03-10T08:50:36.933558+0000 mgr.vm02.ttibzz (mgr.14195) 791 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:38.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:38 vm02 bash[17473]: audit 2026-03-10T08:50:37.055236+0000 mgr.vm02.ttibzz (mgr.14195) 792 : audit [DBG] from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:38.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:38 vm02 bash[17473]: audit 2026-03-10T08:50:37.055236+0000 mgr.vm02.ttibzz (mgr.14195) 792 : audit [DBG] from='client.16072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:38.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:38 vm02 bash[17473]: audit 2026-03-10T08:50:37.247062+0000 mon.vm02 (mon.0) 1094 : audit [DBG] from='client.? 192.168.123.102:0/2190656771' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:38.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:38 vm02 bash[17473]: audit 2026-03-10T08:50:37.247062+0000 mon.vm02 (mon.0) 1094 : audit [DBG] from='client.? 192.168.123.102:0/2190656771' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:39.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:39 vm02 bash[17473]: cluster 2026-03-10T08:50:38.933974+0000 mgr.vm02.ttibzz (mgr.14195) 793 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:39.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:39 vm02 bash[17473]: cluster 2026-03-10T08:50:38.933974+0000 mgr.vm02.ttibzz (mgr.14195) 793 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:50:42.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:41 vm02 bash[17473]: cluster 2026-03-10T08:50:40.934355+0000 mgr.vm02.ttibzz (mgr.14195) 794 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:42.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:41 vm02 bash[17473]: cluster 2026-03-10T08:50:40.934355+0000 mgr.vm02.ttibzz (mgr.14195) 794 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:50:42.414 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to stop 2026-03-10T08:50:42.563 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:42.563 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 5m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:42.563 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:42.563 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:42.563 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:42.713 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:50:42.713 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:50:42.715 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T08:50:42.882 INFO:teuthology.orchestra.run.vm02.stdout:anonymousScheduled to start rgw.foo.vm07.wecerd on host 'vm07' 2026-03-10T08:50:43.066 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to start 2026-03-10T08:50:43.238 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:43.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 5m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:43.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:43.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:43.238 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:43.422 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:43.422 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:43.422 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.405354+0000 mgr.vm02.ttibzz (mgr.14195) 795 : audit [DBG] from='client.16080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.405354+0000 mgr.vm02.ttibzz (mgr.14195) 795 : audit [DBG] from='client.16080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.566417+0000 mgr.vm02.ttibzz (mgr.14195) 796 : audit [DBG] from='client.16084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.566417+0000 mgr.vm02.ttibzz (mgr.14195) 796 : audit [DBG] from='client.16084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.872929+0000 mgr.vm02.ttibzz (mgr.14195) 797 : audit [DBG] from='client.16092 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm07.wecerd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.872929+0000 mgr.vm02.ttibzz (mgr.14195) 797 : audit [DBG] from='client.16092 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm07.wecerd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: cephadm 2026-03-10T08:50:42.873257+0000 mgr.vm02.ttibzz (mgr.14195) 798 : cephadm [INF] Schedule start daemon rgw.foo.vm07.wecerd 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: cephadm 2026-03-10T08:50:42.873257+0000 mgr.vm02.ttibzz (mgr.14195) 798 : cephadm [INF] Schedule start daemon rgw.foo.vm07.wecerd 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.880946+0000 mon.vm02 (mon.0) 1095 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.880946+0000 mon.vm02 (mon.0) 1095 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.886279+0000 mon.vm02 (mon.0) 1096 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.886279+0000 mon.vm02 (mon.0) 1096 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.887269+0000 mon.vm02 (mon.0) 1097 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.887269+0000 mon.vm02 (mon.0) 1097 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.888503+0000 mon.vm02 (mon.0) 1098 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.888503+0000 mon.vm02 (mon.0) 1098 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.890339+0000 mon.vm02 (mon.0) 1099 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.890339+0000 mon.vm02 (mon.0) 1099 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: cluster 2026-03-10T08:50:42.891246+0000 mgr.vm02.ttibzz (mgr.14195) 799 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 171 B/s wr, 0 op/s 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: cluster 2026-03-10T08:50:42.891246+0000 mgr.vm02.ttibzz (mgr.14195) 799 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 171 B/s wr, 0 op/s 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: cluster 2026-03-10T08:50:42.891664+0000 mgr.vm02.ttibzz (mgr.14195) 800 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 205 B/s wr, 0 op/s 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: cluster 2026-03-10T08:50:42.891664+0000 mgr.vm02.ttibzz (mgr.14195) 800 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 205 B/s wr, 0 op/s 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.895269+0000 mon.vm02 (mon.0) 1100 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.895269+0000 mon.vm02 (mon.0) 1100 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.896617+0000 mon.vm02 (mon.0) 1101 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:42.896617+0000 mon.vm02 (mon.0) 1101 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:43.427749+0000 mon.vm02 (mon.0) 1102 : audit [DBG] from='client.? 192.168.123.102:0/1869243257' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:44 vm02 bash[17473]: audit 2026-03-10T08:50:43.427749+0000 mon.vm02 (mon.0) 1102 : audit [DBG] from='client.? 192.168.123.102:0/1869243257' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:45.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:45 vm02 bash[17473]: audit 2026-03-10T08:50:43.057446+0000 mgr.vm02.ttibzz (mgr.14195) 801 : audit [DBG] from='client.16096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:45.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:45 vm02 bash[17473]: audit 2026-03-10T08:50:43.057446+0000 mgr.vm02.ttibzz (mgr.14195) 801 : audit [DBG] from='client.16096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:45.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:45 vm02 bash[17473]: audit 2026-03-10T08:50:43.240790+0000 mgr.vm02.ttibzz (mgr.14195) 802 : audit [DBG] from='client.25355 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:45.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:45 vm02 bash[17473]: audit 2026-03-10T08:50:43.240790+0000 mgr.vm02.ttibzz (mgr.14195) 802 : audit [DBG] from='client.25355 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:45.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:45 vm02 bash[17473]: cluster 2026-03-10T08:50:44.891976+0000 mgr.vm02.ttibzz (mgr.14195) 803 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 0 op/s 2026-03-10T08:50:45.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:45 vm02 bash[17473]: cluster 2026-03-10T08:50:44.891976+0000 mgr.vm02.ttibzz (mgr.14195) 803 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 0 op/s 2026-03-10T08:50:48.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:47 vm02 bash[17473]: cluster 2026-03-10T08:50:46.892460+0000 mgr.vm02.ttibzz (mgr.14195) 804 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 0 op/s 2026-03-10T08:50:48.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:47 vm02 bash[17473]: cluster 2026-03-10T08:50:46.892460+0000 mgr.vm02.ttibzz (mgr.14195) 804 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 0 op/s 2026-03-10T08:50:48.594 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to start 2026-03-10T08:50:48.748 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:48.748 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 5m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:48.748 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:48.748 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:48.748 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:48.935 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:48.935 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:48.935 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:49.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:48 vm02 bash[17473]: audit 2026-03-10T08:50:48.941123+0000 mon.vm02 (mon.0) 1103 : audit [DBG] from='client.? 192.168.123.102:0/1623805292' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:49.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:48 vm02 bash[17473]: audit 2026-03-10T08:50:48.941123+0000 mon.vm02 (mon.0) 1103 : audit [DBG] from='client.? 192.168.123.102:0/1623805292' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: audit 2026-03-10T08:50:48.584818+0000 mgr.vm02.ttibzz (mgr.14195) 805 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: audit 2026-03-10T08:50:48.584818+0000 mgr.vm02.ttibzz (mgr.14195) 805 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: audit 2026-03-10T08:50:48.750942+0000 mgr.vm02.ttibzz (mgr.14195) 806 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: audit 2026-03-10T08:50:48.750942+0000 mgr.vm02.ttibzz (mgr.14195) 806 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: cluster 2026-03-10T08:50:48.892869+0000 mgr.vm02.ttibzz (mgr.14195) 807 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 0 op/s 2026-03-10T08:50:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: cluster 2026-03-10T08:50:48.892869+0000 mgr.vm02.ttibzz (mgr.14195) 807 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 205 B/s rd, 411 B/s wr, 0 op/s 2026-03-10T08:50:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: audit 2026-03-10T08:50:49.254673+0000 mon.vm02 (mon.0) 1104 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:50.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:49 vm02 bash[17473]: audit 2026-03-10T08:50:49.254673+0000 mon.vm02 (mon.0) 1104 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: cluster 2026-03-10T08:50:50.893286+0000 mgr.vm02.ttibzz (mgr.14195) 808 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 205 B/s wr, 0 op/s 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: cluster 2026-03-10T08:50:50.893286+0000 mgr.vm02.ttibzz (mgr.14195) 808 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 205 B/s wr, 0 op/s 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: audit 2026-03-10T08:50:51.770136+0000 mon.vm02 (mon.0) 1105 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: audit 2026-03-10T08:50:51.770136+0000 mon.vm02 (mon.0) 1105 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: audit 2026-03-10T08:50:51.775771+0000 mon.vm02 (mon.0) 1106 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: audit 2026-03-10T08:50:51.775771+0000 mon.vm02 (mon.0) 1106 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: audit 2026-03-10T08:50:51.804109+0000 mon.vm02 (mon.0) 1107 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:50:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:51 vm02 bash[17473]: audit 2026-03-10T08:50:51.804109+0000 mon.vm02 (mon.0) 1107 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:50:53.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:53 vm02 bash[17473]: cluster 2026-03-10T08:50:52.893729+0000 mgr.vm02.ttibzz (mgr.14195) 809 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 6.7 KiB/s rd, 204 B/s wr, 10 op/s 2026-03-10T08:50:53.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:53 vm02 bash[17473]: cluster 2026-03-10T08:50:52.893729+0000 mgr.vm02.ttibzz (mgr.14195) 809 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 6.7 KiB/s rd, 204 B/s wr, 10 op/s 2026-03-10T08:50:54.117 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.wecerd to start 2026-03-10T08:50:54.263 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:50:54.263 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (5m) 5m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:50:54.263 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:50:54.263 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 error 4m ago 11m - - 2026-03-10T08:50:54.263 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 4m ago 11m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:50:54.445 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:50:54.445 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:50:54.445 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.wecerd on vm07 is in error state 2026-03-10T08:50:54.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:54 vm02 bash[17473]: audit 2026-03-10T08:50:54.450520+0000 mon.vm02 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.102:0/127634219' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:54.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:54 vm02 bash[17473]: audit 2026-03-10T08:50:54.450520+0000 mon.vm02 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.102:0/127634219' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:50:55.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:55 vm02 bash[17473]: audit 2026-03-10T08:50:54.106467+0000 mgr.vm02.ttibzz (mgr.14195) 810 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:55.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:55 vm02 bash[17473]: audit 2026-03-10T08:50:54.106467+0000 mgr.vm02.ttibzz (mgr.14195) 810 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:55.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:55 vm02 bash[17473]: audit 2026-03-10T08:50:54.266114+0000 mgr.vm02.ttibzz (mgr.14195) 811 : audit [DBG] from='client.16126 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:55.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:55 vm02 bash[17473]: audit 2026-03-10T08:50:54.266114+0000 mgr.vm02.ttibzz (mgr.14195) 811 : audit [DBG] from='client.16126 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:50:55.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:55 vm02 bash[17473]: cluster 2026-03-10T08:50:54.894150+0000 mgr.vm02.ttibzz (mgr.14195) 812 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 170 B/s wr, 36 op/s 2026-03-10T08:50:55.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:55 vm02 bash[17473]: cluster 2026-03-10T08:50:54.894150+0000 mgr.vm02.ttibzz (mgr.14195) 812 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 170 B/s wr, 36 op/s 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.173980+0000 mon.vm02 (mon.0) 1109 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.173980+0000 mon.vm02 (mon.0) 1109 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.178598+0000 mon.vm02 (mon.0) 1110 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.178598+0000 mon.vm02 (mon.0) 1110 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.179312+0000 mon.vm02 (mon.0) 1111 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.179312+0000 mon.vm02 (mon.0) 1111 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.179756+0000 mon.vm02 (mon.0) 1112 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.179756+0000 mon.vm02 (mon.0) 1112 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: cluster 2026-03-10T08:50:56.180965+0000 mgr.vm02.ttibzz (mgr.14195) 813 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 0 B/s wr, 78 op/s 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: cluster 2026-03-10T08:50:56.180965+0000 mgr.vm02.ttibzz (mgr.14195) 813 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 0 B/s wr, 78 op/s 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.183304+0000 mon.vm02 (mon.0) 1113 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.183304+0000 mon.vm02 (mon.0) 1113 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.184781+0000 mon.vm02 (mon.0) 1114 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:50:57.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:57 vm02 bash[17473]: audit 2026-03-10T08:50:56.184781+0000 mon.vm02 (mon.0) 1114 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:50:58.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:58 vm02 bash[17473]: cluster 2026-03-10T08:50:57.187512+0000 mon.vm02 (mon.0) 1115 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T08:50:58.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:58 vm02 bash[17473]: cluster 2026-03-10T08:50:57.187512+0000 mon.vm02 (mon.0) 1115 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T08:50:58.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:58 vm02 bash[17473]: cluster 2026-03-10T08:50:57.187529+0000 mon.vm02 (mon.0) 1116 : cluster [INF] Cluster is now healthy 2026-03-10T08:50:58.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:58 vm02 bash[17473]: cluster 2026-03-10T08:50:57.187529+0000 mon.vm02 (mon.0) 1116 : cluster [INF] Cluster is now healthy 2026-03-10T08:50:59.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:59 vm02 bash[17473]: cluster 2026-03-10T08:50:58.181805+0000 mgr.vm02.ttibzz (mgr.14195) 814 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 0 B/s wr, 87 op/s 2026-03-10T08:50:59.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:50:59 vm02 bash[17473]: cluster 2026-03-10T08:50:58.181805+0000 mgr.vm02.ttibzz (mgr.14195) 814 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 0 B/s wr, 87 op/s 2026-03-10T08:50:59.617 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (7s) 3s ago 11m 88.9M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:50:59.797 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled to stop rgw.foo.vm07.zylyez on host 'vm07' 2026-03-10T08:50:59.991 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:00.157 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:00.157 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 11m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:00.157 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 11m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:00.157 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (8s) 3s ago 11m 88.9M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:00.157 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (11m) 3s ago 11m 108M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:51:00.347 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.604687+0000 mgr.vm02.ttibzz (mgr.14195) 815 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.604687+0000 mgr.vm02.ttibzz (mgr.14195) 815 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.777838+0000 mgr.vm02.ttibzz (mgr.14195) 816 : audit [DBG] from='client.16138 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm07.zylyez", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.777838+0000 mgr.vm02.ttibzz (mgr.14195) 816 : audit [DBG] from='client.16138 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm07.zylyez", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: cephadm 2026-03-10T08:50:59.778194+0000 mgr.vm02.ttibzz (mgr.14195) 817 : cephadm [INF] Schedule stop daemon rgw.foo.vm07.zylyez 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: cephadm 2026-03-10T08:50:59.778194+0000 mgr.vm02.ttibzz (mgr.14195) 817 : cephadm [INF] Schedule stop daemon rgw.foo.vm07.zylyez 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.792894+0000 mon.vm02 (mon.0) 1117 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.792894+0000 mon.vm02 (mon.0) 1117 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.801192+0000 mon.vm02 (mon.0) 1118 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.801192+0000 mon.vm02 (mon.0) 1118 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.802340+0000 mon.vm02 (mon.0) 1119 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.802340+0000 mon.vm02 (mon.0) 1119 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.803962+0000 mon.vm02 (mon.0) 1120 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.803962+0000 mon.vm02 (mon.0) 1120 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.804856+0000 mon.vm02 (mon.0) 1121 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.804856+0000 mon.vm02 (mon.0) 1121 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.810173+0000 mon.vm02 (mon.0) 1122 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.810173+0000 mon.vm02 (mon.0) 1122 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.812103+0000 mon.vm02 (mon.0) 1123 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.812103+0000 mon.vm02 (mon.0) 1123 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.981312+0000 mgr.vm02.ttibzz (mgr.14195) 818 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:50:59.981312+0000 mgr.vm02.ttibzz (mgr.14195) 818 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:51:00.353381+0000 mon.vm02 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.102:0/2398374546' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:01.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:00 vm02 bash[17473]: audit 2026-03-10T08:51:00.353381+0000 mon.vm02 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.102:0/2398374546' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:02.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:01 vm02 bash[17473]: audit 2026-03-10T08:51:00.159782+0000 mgr.vm02.ttibzz (mgr.14195) 819 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:02.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:01 vm02 bash[17473]: audit 2026-03-10T08:51:00.159782+0000 mgr.vm02.ttibzz (mgr.14195) 819 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:02.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:01 vm02 bash[17473]: cluster 2026-03-10T08:51:00.182166+0000 mgr.vm02.ttibzz (mgr.14195) 820 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 181 B/s wr, 87 op/s 2026-03-10T08:51:02.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:01 vm02 bash[17473]: cluster 2026-03-10T08:51:00.182166+0000 mgr.vm02.ttibzz (mgr.14195) 820 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 181 B/s wr, 87 op/s 2026-03-10T08:51:04.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:03 vm02 bash[17473]: cluster 2026-03-10T08:51:02.182585+0000 mgr.vm02.ttibzz (mgr.14195) 821 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 181 B/s wr, 87 op/s 2026-03-10T08:51:04.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:03 vm02 bash[17473]: cluster 2026-03-10T08:51:02.182585+0000 mgr.vm02.ttibzz (mgr.14195) 821 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 181 B/s wr, 87 op/s 2026-03-10T08:51:05.521 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:05.665 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:05.666 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:05.666 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:05.666 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (13s) 9s ago 12m 88.9M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:05.666 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (12m) 9s ago 12m 108M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:51:05.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:05 vm02 bash[17473]: cluster 2026-03-10T08:51:04.182967+0000 mgr.vm02.ttibzz (mgr.14195) 822 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 181 B/s wr, 78 op/s 2026-03-10T08:51:05.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:05 vm02 bash[17473]: cluster 2026-03-10T08:51:04.182967+0000 mgr.vm02.ttibzz (mgr.14195) 822 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 51 KiB/s rd, 181 B/s wr, 78 op/s 2026-03-10T08:51:05.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:05 vm02 bash[17473]: audit 2026-03-10T08:51:04.259292+0000 mon.vm02 (mon.0) 1125 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:05.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:05 vm02 bash[17473]: audit 2026-03-10T08:51:04.259292+0000 mon.vm02 (mon.0) 1125 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:05.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:05 vm02 bash[17473]: audit 2026-03-10T08:51:04.260023+0000 mon.vm02 (mon.0) 1126 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:05.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:05 vm02 bash[17473]: audit 2026-03-10T08:51:04.260023+0000 mon.vm02 (mon.0) 1126 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:05.852 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:51:06.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:06 vm02 bash[17473]: audit 2026-03-10T08:51:05.511000+0000 mgr.vm02.ttibzz (mgr.14195) 823 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:06.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:06 vm02 bash[17473]: audit 2026-03-10T08:51:05.511000+0000 mgr.vm02.ttibzz (mgr.14195) 823 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:06.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:06 vm02 bash[17473]: audit 2026-03-10T08:51:05.669026+0000 mgr.vm02.ttibzz (mgr.14195) 824 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:06.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:06 vm02 bash[17473]: audit 2026-03-10T08:51:05.669026+0000 mgr.vm02.ttibzz (mgr.14195) 824 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:06.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:06 vm02 bash[17473]: audit 2026-03-10T08:51:05.858328+0000 mon.vm02 (mon.0) 1127 : audit [DBG] from='client.? 192.168.123.102:0/3875249175' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:06.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:06 vm02 bash[17473]: audit 2026-03-10T08:51:05.858328+0000 mon.vm02 (mon.0) 1127 : audit [DBG] from='client.? 192.168.123.102:0/3875249175' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:07.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:07 vm02 bash[17473]: cluster 2026-03-10T08:51:06.183377+0000 mgr.vm02.ttibzz (mgr.14195) 825 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 181 B/s wr, 48 op/s 2026-03-10T08:51:07.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:07 vm02 bash[17473]: cluster 2026-03-10T08:51:06.183377+0000 mgr.vm02.ttibzz (mgr.14195) 825 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 181 B/s wr, 48 op/s 2026-03-10T08:51:09.688 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:09 vm02 bash[17473]: cluster 2026-03-10T08:51:08.183754+0000 mgr.vm02.ttibzz (mgr.14195) 826 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 5.4 KiB/s rd, 170 B/s wr, 8 op/s 2026-03-10T08:51:09.688 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:09 vm02 bash[17473]: cluster 2026-03-10T08:51:08.183754+0000 mgr.vm02.ttibzz (mgr.14195) 826 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 5.4 KiB/s rd, 170 B/s wr, 8 op/s 2026-03-10T08:51:11.028 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:11.179 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:11.179 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:11.179 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:11.179 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (19s) 15s ago 12m 88.9M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:11.179 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (12m) 15s ago 12m 108M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:51:11.358 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:51:12.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:11 vm02 bash[17473]: cluster 2026-03-10T08:51:10.184201+0000 mgr.vm02.ttibzz (mgr.14195) 827 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:51:12.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:11 vm02 bash[17473]: cluster 2026-03-10T08:51:10.184201+0000 mgr.vm02.ttibzz (mgr.14195) 827 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:51:12.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:11 vm02 bash[17473]: audit 2026-03-10T08:51:11.358860+0000 mon.vm07 (mon.1) 42 : audit [DBG] from='client.? 192.168.123.102:0/3627621198' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:12.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:11 vm02 bash[17473]: audit 2026-03-10T08:51:11.358860+0000 mon.vm07 (mon.1) 42 : audit [DBG] from='client.? 192.168.123.102:0/3627621198' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:12 vm02 bash[17473]: audit 2026-03-10T08:51:11.018072+0000 mgr.vm02.ttibzz (mgr.14195) 828 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:12 vm02 bash[17473]: audit 2026-03-10T08:51:11.018072+0000 mgr.vm02.ttibzz (mgr.14195) 828 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:12 vm02 bash[17473]: audit 2026-03-10T08:51:11.182814+0000 mgr.vm02.ttibzz (mgr.14195) 829 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:12 vm02 bash[17473]: audit 2026-03-10T08:51:11.182814+0000 mgr.vm02.ttibzz (mgr.14195) 829 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:14.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:13 vm02 bash[17473]: cluster 2026-03-10T08:51:12.184655+0000 mgr.vm02.ttibzz (mgr.14195) 830 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:14.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:13 vm02 bash[17473]: cluster 2026-03-10T08:51:12.184655+0000 mgr.vm02.ttibzz (mgr.14195) 830 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:16.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: cluster 2026-03-10T08:51:14.185052+0000 mgr.vm02.ttibzz (mgr.14195) 831 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:16.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: cluster 2026-03-10T08:51:14.185052+0000 mgr.vm02.ttibzz (mgr.14195) 831 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:16.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: audit 2026-03-10T08:51:14.825477+0000 mon.vm02 (mon.0) 1128 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:16.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: audit 2026-03-10T08:51:14.825477+0000 mon.vm02 (mon.0) 1128 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:16.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: audit 2026-03-10T08:51:14.832023+0000 mon.vm02 (mon.0) 1129 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:16.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: audit 2026-03-10T08:51:14.832023+0000 mon.vm02 (mon.0) 1129 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:16.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: audit 2026-03-10T08:51:14.860741+0000 mon.vm02 (mon.0) 1130 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:51:16.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:15 vm02 bash[17473]: audit 2026-03-10T08:51:14.860741+0000 mon.vm02 (mon.0) 1130 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:51:16.534 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:16.685 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:16.685 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:16.685 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:16.685 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (24s) 20s ago 12m 88.9M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:16.685 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (12m) 20s ago 12m 108M - 19.2.3-678-ge911bdeb 654f31e6858e 5c0984cdc6b8 2026-03-10T08:51:16.872 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: cluster 2026-03-10T08:51:16.185442+0000 mgr.vm02.ttibzz (mgr.14195) 832 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: cluster 2026-03-10T08:51:16.185442+0000 mgr.vm02.ttibzz (mgr.14195) 832 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: audit 2026-03-10T08:51:16.524659+0000 mgr.vm02.ttibzz (mgr.14195) 833 : audit [DBG] from='client.25407 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: audit 2026-03-10T08:51:16.524659+0000 mgr.vm02.ttibzz (mgr.14195) 833 : audit [DBG] from='client.25407 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: audit 2026-03-10T08:51:16.689152+0000 mgr.vm02.ttibzz (mgr.14195) 834 : audit [DBG] from='client.16182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: audit 2026-03-10T08:51:16.689152+0000 mgr.vm02.ttibzz (mgr.14195) 834 : audit [DBG] from='client.16182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: audit 2026-03-10T08:51:16.877889+0000 mon.vm02 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.102:0/4079454469' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:17 vm02 bash[17473]: audit 2026-03-10T08:51:16.877889+0000 mon.vm02 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.102:0/4079454469' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:19 vm02 bash[17473]: cluster 2026-03-10T08:51:18.185781+0000 mgr.vm02.ttibzz (mgr.14195) 835 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:19 vm02 bash[17473]: cluster 2026-03-10T08:51:18.185781+0000 mgr.vm02.ttibzz (mgr.14195) 835 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:19 vm02 bash[17473]: audit 2026-03-10T08:51:19.255154+0000 mon.vm02 (mon.0) 1132 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:19 vm02 bash[17473]: audit 2026-03-10T08:51:19.255154+0000 mon.vm02 (mon.0) 1132 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.090380+0000 mon.vm02 (mon.0) 1133 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.090380+0000 mon.vm02 (mon.0) 1133 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.094611+0000 mon.vm02 (mon.0) 1134 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.094611+0000 mon.vm02 (mon.0) 1134 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: cluster 2026-03-10T08:51:20.186098+0000 mgr.vm02.ttibzz (mgr.14195) 836 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: cluster 2026-03-10T08:51:20.186098+0000 mgr.vm02.ttibzz (mgr.14195) 836 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.387244+0000 mon.vm02 (mon.0) 1135 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.387244+0000 mon.vm02 (mon.0) 1135 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.387759+0000 mon.vm02 (mon.0) 1136 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.387759+0000 mon.vm02 (mon.0) 1136 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: cluster 2026-03-10T08:51:20.388941+0000 mgr.vm02.ttibzz (mgr.14195) 837 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: cluster 2026-03-10T08:51:20.388941+0000 mgr.vm02.ttibzz (mgr.14195) 837 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.391997+0000 mon.vm02 (mon.0) 1137 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.391997+0000 mon.vm02 (mon.0) 1137 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.393249+0000 mon.vm02 (mon.0) 1138 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:51:21.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:21 vm02 bash[17473]: audit 2026-03-10T08:51:20.393249+0000 mon.vm02 (mon.0) 1138 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:51:22.045 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:22.197 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:22.198 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:22.198 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:22.198 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (30s) 2s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:22.198 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2s ago 12m - - 2026-03-10T08:51:22.383 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:51:22.383 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:51:22.383 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:51:22.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:22 vm02 bash[17473]: cluster 2026-03-10T08:51:21.096516+0000 mon.vm02 (mon.0) 1139 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T08:51:22.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:22 vm02 bash[17473]: cluster 2026-03-10T08:51:21.096516+0000 mon.vm02 (mon.0) 1139 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: audit 2026-03-10T08:51:22.037842+0000 mgr.vm02.ttibzz (mgr.14195) 838 : audit [DBG] from='client.16190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: audit 2026-03-10T08:51:22.037842+0000 mgr.vm02.ttibzz (mgr.14195) 838 : audit [DBG] from='client.16190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: audit 2026-03-10T08:51:22.201403+0000 mgr.vm02.ttibzz (mgr.14195) 839 : audit [DBG] from='client.16194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: audit 2026-03-10T08:51:22.201403+0000 mgr.vm02.ttibzz (mgr.14195) 839 : audit [DBG] from='client.16194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: audit 2026-03-10T08:51:22.389028+0000 mon.vm02 (mon.0) 1140 : audit [DBG] from='client.? 192.168.123.102:0/2014682058' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: audit 2026-03-10T08:51:22.389028+0000 mon.vm02 (mon.0) 1140 : audit [DBG] from='client.? 192.168.123.102:0/2014682058' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: cluster 2026-03-10T08:51:22.389274+0000 mgr.vm02.ttibzz (mgr.14195) 840 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:51:23.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:23 vm02 bash[17473]: cluster 2026-03-10T08:51:22.389274+0000 mgr.vm02.ttibzz (mgr.14195) 840 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:51:25.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:25 vm02 bash[17473]: cluster 2026-03-10T08:51:24.389738+0000 mgr.vm02.ttibzz (mgr.14195) 841 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-03-10T08:51:25.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:25 vm02 bash[17473]: cluster 2026-03-10T08:51:24.389738+0000 mgr.vm02.ttibzz (mgr.14195) 841 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-03-10T08:51:27.557 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:27.703 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:27.703 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:27.703 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:27.703 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (35s) 7s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:27.703 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 7s ago 12m - - 2026-03-10T08:51:27.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:27 vm02 bash[17473]: cluster 2026-03-10T08:51:26.390260+0000 mgr.vm02.ttibzz (mgr.14195) 842 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-03-10T08:51:27.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:27 vm02 bash[17473]: cluster 2026-03-10T08:51:26.390260+0000 mgr.vm02.ttibzz (mgr.14195) 842 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-03-10T08:51:27.886 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:51:27.886 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:51:27.886 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:51:28.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:28 vm02 bash[17473]: audit 2026-03-10T08:51:27.546254+0000 mgr.vm02.ttibzz (mgr.14195) 843 : audit [DBG] from='client.16202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:28.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:28 vm02 bash[17473]: audit 2026-03-10T08:51:27.546254+0000 mgr.vm02.ttibzz (mgr.14195) 843 : audit [DBG] from='client.16202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:28.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:28 vm02 bash[17473]: audit 2026-03-10T08:51:27.706580+0000 mgr.vm02.ttibzz (mgr.14195) 844 : audit [DBG] from='client.16206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:28.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:28 vm02 bash[17473]: audit 2026-03-10T08:51:27.706580+0000 mgr.vm02.ttibzz (mgr.14195) 844 : audit [DBG] from='client.16206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:28.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:28 vm02 bash[17473]: audit 2026-03-10T08:51:27.892652+0000 mon.vm02 (mon.0) 1141 : audit [DBG] from='client.? 192.168.123.102:0/2650209331' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:28.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:28 vm02 bash[17473]: audit 2026-03-10T08:51:27.892652+0000 mon.vm02 (mon.0) 1141 : audit [DBG] from='client.? 192.168.123.102:0/2650209331' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:29.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:29 vm02 bash[17473]: cluster 2026-03-10T08:51:28.390640+0000 mgr.vm02.ttibzz (mgr.14195) 845 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-03-10T08:51:29.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:29 vm02 bash[17473]: cluster 2026-03-10T08:51:28.390640+0000 mgr.vm02.ttibzz (mgr.14195) 845 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-03-10T08:51:31.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:31 vm02 bash[17473]: cluster 2026-03-10T08:51:30.391056+0000 mgr.vm02.ttibzz (mgr.14195) 846 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 401 B/s wr, 0 op/s 2026-03-10T08:51:31.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:31 vm02 bash[17473]: cluster 2026-03-10T08:51:30.391056+0000 mgr.vm02.ttibzz (mgr.14195) 846 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 200 B/s rd, 401 B/s wr, 0 op/s 2026-03-10T08:51:33.062 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:33.225 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:33.225 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:33.225 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (5m) 5m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:33.225 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (41s) 13s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:33.225 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 13s ago 12m - - 2026-03-10T08:51:33.405 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:51:33.406 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:51:33.406 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:51:33.738 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:33 vm02 bash[17473]: cluster 2026-03-10T08:51:32.391498+0000 mgr.vm02.ttibzz (mgr.14195) 847 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:33.738 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:33 vm02 bash[17473]: cluster 2026-03-10T08:51:32.391498+0000 mgr.vm02.ttibzz (mgr.14195) 847 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:33.738 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:33 vm02 bash[17473]: audit 2026-03-10T08:51:33.411619+0000 mon.vm02 (mon.0) 1142 : audit [DBG] from='client.? 192.168.123.102:0/2342274588' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:33.738 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:33 vm02 bash[17473]: audit 2026-03-10T08:51:33.411619+0000 mon.vm02 (mon.0) 1142 : audit [DBG] from='client.? 192.168.123.102:0/2342274588' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:34.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:33.052712+0000 mgr.vm02.ttibzz (mgr.14195) 848 : audit [DBG] from='client.25433 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:34.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:33.052712+0000 mgr.vm02.ttibzz (mgr.14195) 848 : audit [DBG] from='client.25433 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:34.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:33.228552+0000 mgr.vm02.ttibzz (mgr.14195) 849 : audit [DBG] from='client.16218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:34.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:33.228552+0000 mgr.vm02.ttibzz (mgr.14195) 849 : audit [DBG] from='client.16218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:34.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:34.259991+0000 mon.vm02 (mon.0) 1143 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:34.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:34.259991+0000 mon.vm02 (mon.0) 1143 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:51:34.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:34.260888+0000 mon.vm02 (mon.0) 1144 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:34.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:34 vm02 bash[17473]: audit 2026-03-10T08:51:34.260888+0000 mon.vm02 (mon.0) 1144 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:35.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:35 vm02 bash[17473]: cluster 2026-03-10T08:51:34.391930+0000 mgr.vm02.ttibzz (mgr.14195) 850 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:35.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:35 vm02 bash[17473]: cluster 2026-03-10T08:51:34.391930+0000 mgr.vm02.ttibzz (mgr.14195) 850 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:37.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:37 vm02 bash[17473]: cluster 2026-03-10T08:51:36.392440+0000 mgr.vm02.ttibzz (mgr.14195) 851 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:37.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:37 vm02 bash[17473]: cluster 2026-03-10T08:51:36.392440+0000 mgr.vm02.ttibzz (mgr.14195) 851 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:38.581 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:38.726 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:38.726 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 5m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:38.726 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 5m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:38.726 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (46s) 18s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:38.726 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 18s ago 12m - - 2026-03-10T08:51:38.918 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:51:38.918 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:51:38.918 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: cluster 2026-03-10T08:51:38.392857+0000 mgr.vm02.ttibzz (mgr.14195) 852 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: cluster 2026-03-10T08:51:38.392857+0000 mgr.vm02.ttibzz (mgr.14195) 852 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: audit 2026-03-10T08:51:38.571100+0000 mgr.vm02.ttibzz (mgr.14195) 853 : audit [DBG] from='client.16226 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: audit 2026-03-10T08:51:38.571100+0000 mgr.vm02.ttibzz (mgr.14195) 853 : audit [DBG] from='client.16226 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: audit 2026-03-10T08:51:38.729017+0000 mgr.vm02.ttibzz (mgr.14195) 854 : audit [DBG] from='client.16230 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: audit 2026-03-10T08:51:38.729017+0000 mgr.vm02.ttibzz (mgr.14195) 854 : audit [DBG] from='client.16230 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: audit 2026-03-10T08:51:38.924411+0000 mon.vm02 (mon.0) 1145 : audit [DBG] from='client.? 192.168.123.102:0/2695365579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:39.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:39 vm02 bash[17473]: audit 2026-03-10T08:51:38.924411+0000 mon.vm02 (mon.0) 1145 : audit [DBG] from='client.? 192.168.123.102:0/2695365579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:41.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:41 vm02 bash[17473]: cluster 2026-03-10T08:51:40.393253+0000 mgr.vm02.ttibzz (mgr.14195) 855 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:41.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:41 vm02 bash[17473]: cluster 2026-03-10T08:51:40.393253+0000 mgr.vm02.ttibzz (mgr.14195) 855 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:51:43.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:43 vm02 bash[17473]: cluster 2026-03-10T08:51:42.393641+0000 mgr.vm02.ttibzz (mgr.14195) 856 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:43.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:43 vm02 bash[17473]: cluster 2026-03-10T08:51:42.393641+0000 mgr.vm02.ttibzz (mgr.14195) 856 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:44.096 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:44.252 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:44.252 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 6m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:44.252 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:44.252 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (52s) 24s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:44.252 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 24s ago 12m - - 2026-03-10T08:51:44.442 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:51:44.442 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:51:44.442 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:51:44.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:44 vm02 bash[17473]: audit 2026-03-10T08:51:44.448795+0000 mon.vm02 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.102:0/1122209580' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:44.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:44 vm02 bash[17473]: audit 2026-03-10T08:51:44.448795+0000 mon.vm02 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.102:0/1122209580' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:45.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:45 vm02 bash[17473]: audit 2026-03-10T08:51:44.087105+0000 mgr.vm02.ttibzz (mgr.14195) 857 : audit [DBG] from='client.16238 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:45.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:45 vm02 bash[17473]: audit 2026-03-10T08:51:44.087105+0000 mgr.vm02.ttibzz (mgr.14195) 857 : audit [DBG] from='client.16238 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:45.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:45 vm02 bash[17473]: audit 2026-03-10T08:51:44.255842+0000 mgr.vm02.ttibzz (mgr.14195) 858 : audit [DBG] from='client.16242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:45.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:45 vm02 bash[17473]: audit 2026-03-10T08:51:44.255842+0000 mgr.vm02.ttibzz (mgr.14195) 858 : audit [DBG] from='client.16242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:45.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:45 vm02 bash[17473]: cluster 2026-03-10T08:51:44.394034+0000 mgr.vm02.ttibzz (mgr.14195) 859 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:45.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:45 vm02 bash[17473]: cluster 2026-03-10T08:51:44.394034+0000 mgr.vm02.ttibzz (mgr.14195) 859 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:48.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:47 vm02 bash[17473]: cluster 2026-03-10T08:51:46.394423+0000 mgr.vm02.ttibzz (mgr.14195) 860 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:48.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:47 vm02 bash[17473]: cluster 2026-03-10T08:51:46.394423+0000 mgr.vm02.ttibzz (mgr.14195) 860 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:49.609 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:49.753 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:49.753 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 6m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:49.753 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:49.753 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (57s) 29s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:49.753 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 29s ago 12m - - 2026-03-10T08:51:49.928 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:51:49.928 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:51:49.928 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:51:50.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:49 vm02 bash[17473]: cluster 2026-03-10T08:51:48.394814+0000 mgr.vm02.ttibzz (mgr.14195) 861 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:50.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:49 vm02 bash[17473]: cluster 2026-03-10T08:51:48.394814+0000 mgr.vm02.ttibzz (mgr.14195) 861 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:50.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:49 vm02 bash[17473]: audit 2026-03-10T08:51:49.255665+0000 mon.vm02 (mon.0) 1147 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:50.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:49 vm02 bash[17473]: audit 2026-03-10T08:51:49.255665+0000 mon.vm02 (mon.0) 1147 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:51:51.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:50 vm02 bash[17473]: audit 2026-03-10T08:51:49.601435+0000 mgr.vm02.ttibzz (mgr.14195) 862 : audit [DBG] from='client.16250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:51.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:50 vm02 bash[17473]: audit 2026-03-10T08:51:49.601435+0000 mgr.vm02.ttibzz (mgr.14195) 862 : audit [DBG] from='client.16250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:51.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:50 vm02 bash[17473]: audit 2026-03-10T08:51:49.757322+0000 mgr.vm02.ttibzz (mgr.14195) 863 : audit [DBG] from='client.16254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:51.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:50 vm02 bash[17473]: audit 2026-03-10T08:51:49.757322+0000 mgr.vm02.ttibzz (mgr.14195) 863 : audit [DBG] from='client.16254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:51.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:50 vm02 bash[17473]: audit 2026-03-10T08:51:49.934157+0000 mon.vm02 (mon.0) 1148 : audit [DBG] from='client.? 192.168.123.102:0/2533212699' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:51.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:50 vm02 bash[17473]: audit 2026-03-10T08:51:49.934157+0000 mon.vm02 (mon.0) 1148 : audit [DBG] from='client.? 192.168.123.102:0/2533212699' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:52.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:51 vm02 bash[17473]: cluster 2026-03-10T08:51:50.395225+0000 mgr.vm02.ttibzz (mgr.14195) 864 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:52.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:51 vm02 bash[17473]: cluster 2026-03-10T08:51:50.395225+0000 mgr.vm02.ttibzz (mgr.14195) 864 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:53.968 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:53 vm02 bash[17473]: cluster 2026-03-10T08:51:52.395641+0000 mgr.vm02.ttibzz (mgr.14195) 865 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:51:53.968 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:53 vm02 bash[17473]: cluster 2026-03-10T08:51:52.395641+0000 mgr.vm02.ttibzz (mgr.14195) 865 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:51:55.102 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:51:55.273 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:51:55.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (6m) 6m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:51:55.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:51:55.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (63s) 35s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:51:55.273 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 35s ago 12m - - 2026-03-10T08:51:55.458 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:51:55.458 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:51:55.458 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:51:56.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:55 vm02 bash[17473]: cluster 2026-03-10T08:51:54.396024+0000 mgr.vm02.ttibzz (mgr.14195) 866 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:56.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:55 vm02 bash[17473]: cluster 2026-03-10T08:51:54.396024+0000 mgr.vm02.ttibzz (mgr.14195) 866 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:56.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:55 vm02 bash[17473]: audit 2026-03-10T08:51:55.463666+0000 mon.vm02 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.102:0/1427726826' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:56.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:55 vm02 bash[17473]: audit 2026-03-10T08:51:55.463666+0000 mon.vm02 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.102:0/1427726826' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:51:57.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:56 vm02 bash[17473]: audit 2026-03-10T08:51:55.090139+0000 mgr.vm02.ttibzz (mgr.14195) 867 : audit [DBG] from='client.16262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:57.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:56 vm02 bash[17473]: audit 2026-03-10T08:51:55.090139+0000 mgr.vm02.ttibzz (mgr.14195) 867 : audit [DBG] from='client.16262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:57.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:56 vm02 bash[17473]: audit 2026-03-10T08:51:55.276828+0000 mgr.vm02.ttibzz (mgr.14195) 868 : audit [DBG] from='client.16266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:57.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:56 vm02 bash[17473]: audit 2026-03-10T08:51:55.276828+0000 mgr.vm02.ttibzz (mgr.14195) 868 : audit [DBG] from='client.16266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:51:58.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:57 vm02 bash[17473]: cluster 2026-03-10T08:51:56.396464+0000 mgr.vm02.ttibzz (mgr.14195) 869 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:51:58.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:57 vm02 bash[17473]: cluster 2026-03-10T08:51:56.396464+0000 mgr.vm02.ttibzz (mgr.14195) 869 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:00.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:59 vm02 bash[17473]: cluster 2026-03-10T08:51:58.396934+0000 mgr.vm02.ttibzz (mgr.14195) 870 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:00.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:51:59 vm02 bash[17473]: cluster 2026-03-10T08:51:58.396934+0000 mgr.vm02.ttibzz (mgr.14195) 870 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:00.629 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:00.770 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:00.771 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 12m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:00.771 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 12m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:00.771 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (69s) 40s ago 12m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:00.771 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 40s ago 12m - - 2026-03-10T08:52:00.950 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:00.950 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:00.950 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: cluster 2026-03-10T08:52:00.397406+0000 mgr.vm02.ttibzz (mgr.14195) 871 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: cluster 2026-03-10T08:52:00.397406+0000 mgr.vm02.ttibzz (mgr.14195) 871 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: audit 2026-03-10T08:52:00.621280+0000 mgr.vm02.ttibzz (mgr.14195) 872 : audit [DBG] from='client.16274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: audit 2026-03-10T08:52:00.621280+0000 mgr.vm02.ttibzz (mgr.14195) 872 : audit [DBG] from='client.16274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: audit 2026-03-10T08:52:00.774541+0000 mgr.vm02.ttibzz (mgr.14195) 873 : audit [DBG] from='client.16278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: audit 2026-03-10T08:52:00.774541+0000 mgr.vm02.ttibzz (mgr.14195) 873 : audit [DBG] from='client.16278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: audit 2026-03-10T08:52:00.955921+0000 mon.vm02 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.102:0/705116747' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:02.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:01 vm02 bash[17473]: audit 2026-03-10T08:52:00.955921+0000 mon.vm02 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.102:0/705116747' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:04.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:03 vm02 bash[17473]: cluster 2026-03-10T08:52:02.397785+0000 mgr.vm02.ttibzz (mgr.14195) 874 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:04.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:03 vm02 bash[17473]: cluster 2026-03-10T08:52:02.397785+0000 mgr.vm02.ttibzz (mgr.14195) 874 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:05.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:04 vm02 bash[17473]: audit 2026-03-10T08:52:04.255920+0000 mon.vm02 (mon.0) 1151 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:05.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:04 vm02 bash[17473]: audit 2026-03-10T08:52:04.255920+0000 mon.vm02 (mon.0) 1151 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:06.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:05 vm02 bash[17473]: cluster 2026-03-10T08:52:04.398074+0000 mgr.vm02.ttibzz (mgr.14195) 875 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:06.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:05 vm02 bash[17473]: cluster 2026-03-10T08:52:04.398074+0000 mgr.vm02.ttibzz (mgr.14195) 875 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:06.117 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:06.260 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:06.260 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:06.260 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:06.260 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (74s) 46s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:06.260 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 46s ago 13m - - 2026-03-10T08:52:06.437 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:06.437 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:06.437 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:07.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:06 vm02 bash[17473]: audit 2026-03-10T08:52:06.443430+0000 mon.vm02 (mon.0) 1152 : audit [DBG] from='client.? 192.168.123.102:0/2048149681' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:07.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:06 vm02 bash[17473]: audit 2026-03-10T08:52:06.443430+0000 mon.vm02 (mon.0) 1152 : audit [DBG] from='client.? 192.168.123.102:0/2048149681' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:08.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:07 vm02 bash[17473]: audit 2026-03-10T08:52:06.108687+0000 mgr.vm02.ttibzz (mgr.14195) 876 : audit [DBG] from='client.16286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:08.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:07 vm02 bash[17473]: audit 2026-03-10T08:52:06.108687+0000 mgr.vm02.ttibzz (mgr.14195) 876 : audit [DBG] from='client.16286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:08.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:07 vm02 bash[17473]: audit 2026-03-10T08:52:06.263972+0000 mgr.vm02.ttibzz (mgr.14195) 877 : audit [DBG] from='client.16290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:08.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:07 vm02 bash[17473]: audit 2026-03-10T08:52:06.263972+0000 mgr.vm02.ttibzz (mgr.14195) 877 : audit [DBG] from='client.16290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:08.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:07 vm02 bash[17473]: cluster 2026-03-10T08:52:06.398464+0000 mgr.vm02.ttibzz (mgr.14195) 878 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:08.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:07 vm02 bash[17473]: cluster 2026-03-10T08:52:06.398464+0000 mgr.vm02.ttibzz (mgr.14195) 878 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:10.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:09 vm02 bash[17473]: cluster 2026-03-10T08:52:08.398862+0000 mgr.vm02.ttibzz (mgr.14195) 879 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:10.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:09 vm02 bash[17473]: cluster 2026-03-10T08:52:08.398862+0000 mgr.vm02.ttibzz (mgr.14195) 879 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:11.603 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:11.746 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:11.747 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:11.747 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:11.747 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (79s) 51s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:11.747 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 51s ago 13m - - 2026-03-10T08:52:11.923 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:11.924 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:11.924 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:12.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:11 vm02 bash[17473]: cluster 2026-03-10T08:52:10.399339+0000 mgr.vm02.ttibzz (mgr.14195) 880 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:12.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:11 vm02 bash[17473]: cluster 2026-03-10T08:52:10.399339+0000 mgr.vm02.ttibzz (mgr.14195) 880 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:12 vm02 bash[17473]: audit 2026-03-10T08:52:11.595023+0000 mgr.vm02.ttibzz (mgr.14195) 881 : audit [DBG] from='client.16298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:12 vm02 bash[17473]: audit 2026-03-10T08:52:11.595023+0000 mgr.vm02.ttibzz (mgr.14195) 881 : audit [DBG] from='client.16298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:12 vm02 bash[17473]: audit 2026-03-10T08:52:11.750255+0000 mgr.vm02.ttibzz (mgr.14195) 882 : audit [DBG] from='client.16302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:12 vm02 bash[17473]: audit 2026-03-10T08:52:11.750255+0000 mgr.vm02.ttibzz (mgr.14195) 882 : audit [DBG] from='client.16302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:12 vm02 bash[17473]: audit 2026-03-10T08:52:11.929844+0000 mon.vm02 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.102:0/3440178938' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:13.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:12 vm02 bash[17473]: audit 2026-03-10T08:52:11.929844+0000 mon.vm02 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.102:0/3440178938' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:14.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:13 vm02 bash[17473]: cluster 2026-03-10T08:52:12.399719+0000 mgr.vm02.ttibzz (mgr.14195) 883 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:14.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:13 vm02 bash[17473]: cluster 2026-03-10T08:52:12.399719+0000 mgr.vm02.ttibzz (mgr.14195) 883 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:16.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:15 vm02 bash[17473]: cluster 2026-03-10T08:52:14.400180+0000 mgr.vm02.ttibzz (mgr.14195) 884 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:16.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:15 vm02 bash[17473]: cluster 2026-03-10T08:52:14.400180+0000 mgr.vm02.ttibzz (mgr.14195) 884 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:17.098 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:17.246 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:17.246 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:17.246 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:17.246 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (85s) 57s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:17.246 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 57s ago 13m - - 2026-03-10T08:52:17.426 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:17.426 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:17.426 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:18.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:17 vm02 bash[17473]: cluster 2026-03-10T08:52:16.400586+0000 mgr.vm02.ttibzz (mgr.14195) 885 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:18.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:17 vm02 bash[17473]: cluster 2026-03-10T08:52:16.400586+0000 mgr.vm02.ttibzz (mgr.14195) 885 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:18.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:17 vm02 bash[17473]: audit 2026-03-10T08:52:17.431903+0000 mon.vm02 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.102:0/2290776446' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:18.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:17 vm02 bash[17473]: audit 2026-03-10T08:52:17.431903+0000 mon.vm02 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.102:0/2290776446' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:19.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:18 vm02 bash[17473]: audit 2026-03-10T08:52:17.089022+0000 mgr.vm02.ttibzz (mgr.14195) 886 : audit [DBG] from='client.16310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:19.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:18 vm02 bash[17473]: audit 2026-03-10T08:52:17.089022+0000 mgr.vm02.ttibzz (mgr.14195) 886 : audit [DBG] from='client.16310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:19.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:18 vm02 bash[17473]: audit 2026-03-10T08:52:17.249618+0000 mgr.vm02.ttibzz (mgr.14195) 887 : audit [DBG] from='client.25481 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:19.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:18 vm02 bash[17473]: audit 2026-03-10T08:52:17.249618+0000 mgr.vm02.ttibzz (mgr.14195) 887 : audit [DBG] from='client.25481 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:19 vm02 bash[17473]: cluster 2026-03-10T08:52:18.401003+0000 mgr.vm02.ttibzz (mgr.14195) 888 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:19 vm02 bash[17473]: cluster 2026-03-10T08:52:18.401003+0000 mgr.vm02.ttibzz (mgr.14195) 888 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:19 vm02 bash[17473]: audit 2026-03-10T08:52:19.256096+0000 mon.vm02 (mon.0) 1155 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:20.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:19 vm02 bash[17473]: audit 2026-03-10T08:52:19.256096+0000 mon.vm02 (mon.0) 1155 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:21.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:20 vm02 bash[17473]: audit 2026-03-10T08:52:20.440613+0000 mon.vm02 (mon.0) 1156 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:52:21.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:20 vm02 bash[17473]: audit 2026-03-10T08:52:20.440613+0000 mon.vm02 (mon.0) 1156 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: cluster 2026-03-10T08:52:20.401399+0000 mgr.vm02.ttibzz (mgr.14195) 889 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: cluster 2026-03-10T08:52:20.401399+0000 mgr.vm02.ttibzz (mgr.14195) 889 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.753444+0000 mon.vm02 (mon.0) 1157 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.753444+0000 mon.vm02 (mon.0) 1157 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.753916+0000 mon.vm02 (mon.0) 1158 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.753916+0000 mon.vm02 (mon.0) 1158 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: cluster 2026-03-10T08:52:20.754853+0000 mgr.vm02.ttibzz (mgr.14195) 890 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: cluster 2026-03-10T08:52:20.754853+0000 mgr.vm02.ttibzz (mgr.14195) 890 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.758444+0000 mon.vm02 (mon.0) 1159 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.758444+0000 mon.vm02 (mon.0) 1159 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.760243+0000 mon.vm02 (mon.0) 1160 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:52:22.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:21 vm02 bash[17473]: audit 2026-03-10T08:52:20.760243+0000 mon.vm02 (mon.0) 1160 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:52:22.594 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:22.741 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:22.741 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:22.741 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:22.741 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (90s) 62s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:22.741 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 62s ago 13m - - 2026-03-10T08:52:22.920 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:22.920 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:22.920 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: audit 2026-03-10T08:52:22.585448+0000 mgr.vm02.ttibzz (mgr.14195) 891 : audit [DBG] from='client.16322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: audit 2026-03-10T08:52:22.585448+0000 mgr.vm02.ttibzz (mgr.14195) 891 : audit [DBG] from='client.16322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: audit 2026-03-10T08:52:22.744782+0000 mgr.vm02.ttibzz (mgr.14195) 892 : audit [DBG] from='client.16326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: audit 2026-03-10T08:52:22.744782+0000 mgr.vm02.ttibzz (mgr.14195) 892 : audit [DBG] from='client.16326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: cluster 2026-03-10T08:52:22.755204+0000 mgr.vm02.ttibzz (mgr.14195) 893 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: cluster 2026-03-10T08:52:22.755204+0000 mgr.vm02.ttibzz (mgr.14195) 893 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: audit 2026-03-10T08:52:22.926170+0000 mon.vm02 (mon.0) 1161 : audit [DBG] from='client.? 192.168.123.102:0/4230924940' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:24.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:23 vm02 bash[17473]: audit 2026-03-10T08:52:22.926170+0000 mon.vm02 (mon.0) 1161 : audit [DBG] from='client.? 192.168.123.102:0/4230924940' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:26.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:25 vm02 bash[17473]: cluster 2026-03-10T08:52:24.755696+0000 mgr.vm02.ttibzz (mgr.14195) 894 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:52:26.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:25 vm02 bash[17473]: cluster 2026-03-10T08:52:24.755696+0000 mgr.vm02.ttibzz (mgr.14195) 894 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:52:28.088 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:28.234 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:28.234 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:28.234 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:28.234 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (96s) 68s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:28.234 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 68s ago 13m - - 2026-03-10T08:52:28.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:27 vm02 bash[17473]: cluster 2026-03-10T08:52:26.756096+0000 mgr.vm02.ttibzz (mgr.14195) 895 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:52:28.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:27 vm02 bash[17473]: cluster 2026-03-10T08:52:26.756096+0000 mgr.vm02.ttibzz (mgr.14195) 895 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:52:28.411 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:28.411 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:28.411 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:29.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:28 vm02 bash[17473]: audit 2026-03-10T08:52:28.416933+0000 mon.vm02 (mon.0) 1162 : audit [DBG] from='client.? 192.168.123.102:0/1803670596' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:29.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:28 vm02 bash[17473]: audit 2026-03-10T08:52:28.416933+0000 mon.vm02 (mon.0) 1162 : audit [DBG] from='client.? 192.168.123.102:0/1803670596' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:30.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:29 vm02 bash[17473]: audit 2026-03-10T08:52:28.079709+0000 mgr.vm02.ttibzz (mgr.14195) 896 : audit [DBG] from='client.16334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:30.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:29 vm02 bash[17473]: audit 2026-03-10T08:52:28.079709+0000 mgr.vm02.ttibzz (mgr.14195) 896 : audit [DBG] from='client.16334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:30.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:29 vm02 bash[17473]: audit 2026-03-10T08:52:28.237480+0000 mgr.vm02.ttibzz (mgr.14195) 897 : audit [DBG] from='client.16338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:30.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:29 vm02 bash[17473]: audit 2026-03-10T08:52:28.237480+0000 mgr.vm02.ttibzz (mgr.14195) 897 : audit [DBG] from='client.16338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:30.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:29 vm02 bash[17473]: cluster 2026-03-10T08:52:28.756496+0000 mgr.vm02.ttibzz (mgr.14195) 898 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:52:30.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:29 vm02 bash[17473]: cluster 2026-03-10T08:52:28.756496+0000 mgr.vm02.ttibzz (mgr.14195) 898 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:52:32.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:31 vm02 bash[17473]: cluster 2026-03-10T08:52:30.756950+0000 mgr.vm02.ttibzz (mgr.14195) 899 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 395 B/s wr, 0 op/s 2026-03-10T08:52:32.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:31 vm02 bash[17473]: cluster 2026-03-10T08:52:30.756950+0000 mgr.vm02.ttibzz (mgr.14195) 899 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 395 B/s wr, 0 op/s 2026-03-10T08:52:33.580 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:33.725 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:33.725 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:33.725 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (6m) 6m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:33.725 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (101s) 73s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:33.725 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 73s ago 13m - - 2026-03-10T08:52:33.912 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:33.912 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:33.912 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:34.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:33 vm02 bash[17473]: cluster 2026-03-10T08:52:32.757394+0000 mgr.vm02.ttibzz (mgr.14195) 900 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:34.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:33 vm02 bash[17473]: cluster 2026-03-10T08:52:32.757394+0000 mgr.vm02.ttibzz (mgr.14195) 900 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:33.570047+0000 mgr.vm02.ttibzz (mgr.14195) 901 : audit [DBG] from='client.16346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:33.570047+0000 mgr.vm02.ttibzz (mgr.14195) 901 : audit [DBG] from='client.16346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:33.728830+0000 mgr.vm02.ttibzz (mgr.14195) 902 : audit [DBG] from='client.16350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:33.728830+0000 mgr.vm02.ttibzz (mgr.14195) 902 : audit [DBG] from='client.16350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:33.918492+0000 mon.vm02 (mon.0) 1163 : audit [DBG] from='client.? 192.168.123.102:0/3095850364' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:33.918492+0000 mon.vm02 (mon.0) 1163 : audit [DBG] from='client.? 192.168.123.102:0/3095850364' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:34.256552+0000 mon.vm02 (mon.0) 1164 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:35.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:34 vm02 bash[17473]: audit 2026-03-10T08:52:34.256552+0000 mon.vm02 (mon.0) 1164 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:36.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:35 vm02 bash[17473]: cluster 2026-03-10T08:52:34.757892+0000 mgr.vm02.ttibzz (mgr.14195) 903 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:36.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:35 vm02 bash[17473]: cluster 2026-03-10T08:52:34.757892+0000 mgr.vm02.ttibzz (mgr.14195) 903 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:38.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:37 vm02 bash[17473]: cluster 2026-03-10T08:52:36.758417+0000 mgr.vm02.ttibzz (mgr.14195) 904 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:38.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:37 vm02 bash[17473]: cluster 2026-03-10T08:52:36.758417+0000 mgr.vm02.ttibzz (mgr.14195) 904 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:39.080 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:39.230 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:39.230 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 6m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:39.230 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 6m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:39.230 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (107s) 79s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:39.230 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 79s ago 13m - - 2026-03-10T08:52:39.408 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:39.408 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:39.408 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:40.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:39 vm02 bash[17473]: cluster 2026-03-10T08:52:38.758774+0000 mgr.vm02.ttibzz (mgr.14195) 905 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:40.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:39 vm02 bash[17473]: cluster 2026-03-10T08:52:38.758774+0000 mgr.vm02.ttibzz (mgr.14195) 905 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:40.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:39 vm02 bash[17473]: audit 2026-03-10T08:52:39.414265+0000 mon.vm02 (mon.0) 1165 : audit [DBG] from='client.? 192.168.123.102:0/1906504624' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:40.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:39 vm02 bash[17473]: audit 2026-03-10T08:52:39.414265+0000 mon.vm02 (mon.0) 1165 : audit [DBG] from='client.? 192.168.123.102:0/1906504624' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:41.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:40 vm02 bash[17473]: audit 2026-03-10T08:52:39.071653+0000 mgr.vm02.ttibzz (mgr.14195) 906 : audit [DBG] from='client.16358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:41.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:40 vm02 bash[17473]: audit 2026-03-10T08:52:39.071653+0000 mgr.vm02.ttibzz (mgr.14195) 906 : audit [DBG] from='client.16358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:41.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:40 vm02 bash[17473]: audit 2026-03-10T08:52:39.233569+0000 mgr.vm02.ttibzz (mgr.14195) 907 : audit [DBG] from='client.16362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:41.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:40 vm02 bash[17473]: audit 2026-03-10T08:52:39.233569+0000 mgr.vm02.ttibzz (mgr.14195) 907 : audit [DBG] from='client.16362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:42.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:41 vm02 bash[17473]: cluster 2026-03-10T08:52:40.759289+0000 mgr.vm02.ttibzz (mgr.14195) 908 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:42.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:41 vm02 bash[17473]: cluster 2026-03-10T08:52:40.759289+0000 mgr.vm02.ttibzz (mgr.14195) 908 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:52:44.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:43 vm02 bash[17473]: cluster 2026-03-10T08:52:42.759652+0000 mgr.vm02.ttibzz (mgr.14195) 909 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:44.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:43 vm02 bash[17473]: cluster 2026-03-10T08:52:42.759652+0000 mgr.vm02.ttibzz (mgr.14195) 909 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:44.588 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:44.732 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:44.732 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 7m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:44.732 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:44.732 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (112s) 84s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:44.732 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 84s ago 13m - - 2026-03-10T08:52:44.910 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:44.910 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:44.911 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: audit 2026-03-10T08:52:44.572085+0000 mgr.vm02.ttibzz (mgr.14195) 910 : audit [DBG] from='client.16370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: audit 2026-03-10T08:52:44.572085+0000 mgr.vm02.ttibzz (mgr.14195) 910 : audit [DBG] from='client.16370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: audit 2026-03-10T08:52:44.735711+0000 mgr.vm02.ttibzz (mgr.14195) 911 : audit [DBG] from='client.16374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: audit 2026-03-10T08:52:44.735711+0000 mgr.vm02.ttibzz (mgr.14195) 911 : audit [DBG] from='client.16374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: cluster 2026-03-10T08:52:44.759998+0000 mgr.vm02.ttibzz (mgr.14195) 912 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: cluster 2026-03-10T08:52:44.759998+0000 mgr.vm02.ttibzz (mgr.14195) 912 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: audit 2026-03-10T08:52:44.916564+0000 mon.vm02 (mon.0) 1166 : audit [DBG] from='client.? 192.168.123.102:0/3519139670' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:46.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:45 vm02 bash[17473]: audit 2026-03-10T08:52:44.916564+0000 mon.vm02 (mon.0) 1166 : audit [DBG] from='client.? 192.168.123.102:0/3519139670' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:48.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:47 vm02 bash[17473]: cluster 2026-03-10T08:52:46.760481+0000 mgr.vm02.ttibzz (mgr.14195) 913 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:48.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:47 vm02 bash[17473]: cluster 2026-03-10T08:52:46.760481+0000 mgr.vm02.ttibzz (mgr.14195) 913 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:50.088 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:50.236 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:50.236 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (7m) 7m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:50.236 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:50.236 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (118s) 90s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:50.236 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 90s ago 13m - - 2026-03-10T08:52:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:49 vm02 bash[17473]: cluster 2026-03-10T08:52:48.760902+0000 mgr.vm02.ttibzz (mgr.14195) 914 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:49 vm02 bash[17473]: cluster 2026-03-10T08:52:48.760902+0000 mgr.vm02.ttibzz (mgr.14195) 914 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:49 vm02 bash[17473]: audit 2026-03-10T08:52:49.256480+0000 mon.vm02 (mon.0) 1167 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:50.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:49 vm02 bash[17473]: audit 2026-03-10T08:52:49.256480+0000 mon.vm02 (mon.0) 1167 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:52:50.411 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:50.411 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:50.411 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:51.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:50 vm02 bash[17473]: audit 2026-03-10T08:52:50.417432+0000 mon.vm02 (mon.0) 1168 : audit [DBG] from='client.? 192.168.123.102:0/3077549403' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:51.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:50 vm02 bash[17473]: audit 2026-03-10T08:52:50.417432+0000 mon.vm02 (mon.0) 1168 : audit [DBG] from='client.? 192.168.123.102:0/3077549403' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:51 vm02 bash[17473]: audit 2026-03-10T08:52:50.078998+0000 mgr.vm02.ttibzz (mgr.14195) 915 : audit [DBG] from='client.16382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:51 vm02 bash[17473]: audit 2026-03-10T08:52:50.078998+0000 mgr.vm02.ttibzz (mgr.14195) 915 : audit [DBG] from='client.16382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:51 vm02 bash[17473]: audit 2026-03-10T08:52:50.239154+0000 mgr.vm02.ttibzz (mgr.14195) 916 : audit [DBG] from='client.16386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:51 vm02 bash[17473]: audit 2026-03-10T08:52:50.239154+0000 mgr.vm02.ttibzz (mgr.14195) 916 : audit [DBG] from='client.16386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:52.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:51 vm02 bash[17473]: cluster 2026-03-10T08:52:50.761279+0000 mgr.vm02.ttibzz (mgr.14195) 917 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:52.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:51 vm02 bash[17473]: cluster 2026-03-10T08:52:50.761279+0000 mgr.vm02.ttibzz (mgr.14195) 917 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:54.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:53 vm02 bash[17473]: cluster 2026-03-10T08:52:52.761782+0000 mgr.vm02.ttibzz (mgr.14195) 918 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:52:54.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:53 vm02 bash[17473]: cluster 2026-03-10T08:52:52.761782+0000 mgr.vm02.ttibzz (mgr.14195) 918 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:52:55.584 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:52:55.733 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:52:55.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:52:55.734 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:52:55.734 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 95s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:52:55.734 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 95s ago 13m - - 2026-03-10T08:52:55.914 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:52:55.914 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:52:55.914 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:52:56.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:55 vm02 bash[17473]: cluster 2026-03-10T08:52:54.762249+0000 mgr.vm02.ttibzz (mgr.14195) 919 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:56.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:55 vm02 bash[17473]: cluster 2026-03-10T08:52:54.762249+0000 mgr.vm02.ttibzz (mgr.14195) 919 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:56.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:55 vm02 bash[17473]: audit 2026-03-10T08:52:55.919698+0000 mon.vm02 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.102:0/1968099190' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:56.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:55 vm02 bash[17473]: audit 2026-03-10T08:52:55.919698+0000 mon.vm02 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.102:0/1968099190' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:52:57.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:56 vm02 bash[17473]: audit 2026-03-10T08:52:55.574831+0000 mgr.vm02.ttibzz (mgr.14195) 920 : audit [DBG] from='client.16394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:57.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:56 vm02 bash[17473]: audit 2026-03-10T08:52:55.574831+0000 mgr.vm02.ttibzz (mgr.14195) 920 : audit [DBG] from='client.16394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:57.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:56 vm02 bash[17473]: audit 2026-03-10T08:52:55.736994+0000 mgr.vm02.ttibzz (mgr.14195) 921 : audit [DBG] from='client.16398 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:57.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:56 vm02 bash[17473]: audit 2026-03-10T08:52:55.736994+0000 mgr.vm02.ttibzz (mgr.14195) 921 : audit [DBG] from='client.16398 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:52:58.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:57 vm02 bash[17473]: cluster 2026-03-10T08:52:56.762729+0000 mgr.vm02.ttibzz (mgr.14195) 922 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:52:58.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:57 vm02 bash[17473]: cluster 2026-03-10T08:52:56.762729+0000 mgr.vm02.ttibzz (mgr.14195) 922 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:00.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:59 vm02 bash[17473]: cluster 2026-03-10T08:52:58.763236+0000 mgr.vm02.ttibzz (mgr.14195) 923 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:00.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:52:59 vm02 bash[17473]: cluster 2026-03-10T08:52:58.763236+0000 mgr.vm02.ttibzz (mgr.14195) 923 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:01.080 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:01.231 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:01.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 13m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:01.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 13m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:01.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 101s ago 13m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:01.231 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 101s ago 13m - - 2026-03-10T08:53:01.408 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:01.408 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:01.408 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:02.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:01 vm02 bash[17473]: cluster 2026-03-10T08:53:00.763750+0000 mgr.vm02.ttibzz (mgr.14195) 924 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:02.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:01 vm02 bash[17473]: cluster 2026-03-10T08:53:00.763750+0000 mgr.vm02.ttibzz (mgr.14195) 924 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:02.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:01 vm02 bash[17473]: audit 2026-03-10T08:53:01.413830+0000 mon.vm02 (mon.0) 1170 : audit [DBG] from='client.? 192.168.123.102:0/2609933055' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:02.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:01 vm02 bash[17473]: audit 2026-03-10T08:53:01.413830+0000 mon.vm02 (mon.0) 1170 : audit [DBG] from='client.? 192.168.123.102:0/2609933055' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:03.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:02 vm02 bash[17473]: audit 2026-03-10T08:53:01.069980+0000 mgr.vm02.ttibzz (mgr.14195) 925 : audit [DBG] from='client.16406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:03.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:02 vm02 bash[17473]: audit 2026-03-10T08:53:01.069980+0000 mgr.vm02.ttibzz (mgr.14195) 925 : audit [DBG] from='client.16406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:03.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:02 vm02 bash[17473]: audit 2026-03-10T08:53:01.234597+0000 mgr.vm02.ttibzz (mgr.14195) 926 : audit [DBG] from='client.25545 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:03.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:02 vm02 bash[17473]: audit 2026-03-10T08:53:01.234597+0000 mgr.vm02.ttibzz (mgr.14195) 926 : audit [DBG] from='client.25545 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:04.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:03 vm02 bash[17473]: cluster 2026-03-10T08:53:02.764136+0000 mgr.vm02.ttibzz (mgr.14195) 927 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:04.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:03 vm02 bash[17473]: cluster 2026-03-10T08:53:02.764136+0000 mgr.vm02.ttibzz (mgr.14195) 927 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:05.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:04 vm02 bash[17473]: audit 2026-03-10T08:53:04.256762+0000 mon.vm02 (mon.0) 1171 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:05.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:04 vm02 bash[17473]: audit 2026-03-10T08:53:04.256762+0000 mon.vm02 (mon.0) 1171 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:06.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:05 vm02 bash[17473]: cluster 2026-03-10T08:53:04.764537+0000 mgr.vm02.ttibzz (mgr.14195) 928 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:06.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:05 vm02 bash[17473]: cluster 2026-03-10T08:53:04.764537+0000 mgr.vm02.ttibzz (mgr.14195) 928 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:06.576 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:06.720 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:06.720 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:06.720 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:06.720 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 106s ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:06.720 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 106s ago 14m - - 2026-03-10T08:53:06.899 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:06.899 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:06.899 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:07.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:06 vm02 bash[17473]: audit 2026-03-10T08:53:06.904412+0000 mon.vm02 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.102:0/399029667' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:07.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:06 vm02 bash[17473]: audit 2026-03-10T08:53:06.904412+0000 mon.vm02 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.102:0/399029667' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:08.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:07 vm02 bash[17473]: audit 2026-03-10T08:53:06.567533+0000 mgr.vm02.ttibzz (mgr.14195) 929 : audit [DBG] from='client.16418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:07 vm02 bash[17473]: audit 2026-03-10T08:53:06.567533+0000 mgr.vm02.ttibzz (mgr.14195) 929 : audit [DBG] from='client.16418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:07 vm02 bash[17473]: audit 2026-03-10T08:53:06.722896+0000 mgr.vm02.ttibzz (mgr.14195) 930 : audit [DBG] from='client.16422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:07 vm02 bash[17473]: audit 2026-03-10T08:53:06.722896+0000 mgr.vm02.ttibzz (mgr.14195) 930 : audit [DBG] from='client.16422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:07 vm02 bash[17473]: cluster 2026-03-10T08:53:06.764988+0000 mgr.vm02.ttibzz (mgr.14195) 931 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:07 vm02 bash[17473]: cluster 2026-03-10T08:53:06.764988+0000 mgr.vm02.ttibzz (mgr.14195) 931 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:10.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:10 vm02 bash[17473]: cluster 2026-03-10T08:53:08.765454+0000 mgr.vm02.ttibzz (mgr.14195) 932 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:10.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:10 vm02 bash[17473]: cluster 2026-03-10T08:53:08.765454+0000 mgr.vm02.ttibzz (mgr.14195) 932 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:12.065 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:12.217 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:12.217 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:12.217 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:12.217 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 112s ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:12.217 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 112s ago 14m - - 2026-03-10T08:53:12.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:12 vm02 bash[17473]: cluster 2026-03-10T08:53:10.765902+0000 mgr.vm02.ttibzz (mgr.14195) 933 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:12.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:12 vm02 bash[17473]: cluster 2026-03-10T08:53:10.765902+0000 mgr.vm02.ttibzz (mgr.14195) 933 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:12.393 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:12.393 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:12.393 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:13.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:13 vm02 bash[17473]: audit 2026-03-10T08:53:12.399054+0000 mon.vm02 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.102:0/1996496549' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:13.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:13 vm02 bash[17473]: audit 2026-03-10T08:53:12.399054+0000 mon.vm02 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.102:0/1996496549' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:14 vm02 bash[17473]: audit 2026-03-10T08:53:12.057013+0000 mgr.vm02.ttibzz (mgr.14195) 934 : audit [DBG] from='client.16430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:14 vm02 bash[17473]: audit 2026-03-10T08:53:12.057013+0000 mgr.vm02.ttibzz (mgr.14195) 934 : audit [DBG] from='client.16430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:14 vm02 bash[17473]: audit 2026-03-10T08:53:12.220169+0000 mgr.vm02.ttibzz (mgr.14195) 935 : audit [DBG] from='client.16434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:14 vm02 bash[17473]: audit 2026-03-10T08:53:12.220169+0000 mgr.vm02.ttibzz (mgr.14195) 935 : audit [DBG] from='client.16434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:14 vm02 bash[17473]: cluster 2026-03-10T08:53:12.766332+0000 mgr.vm02.ttibzz (mgr.14195) 936 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:14 vm02 bash[17473]: cluster 2026-03-10T08:53:12.766332+0000 mgr.vm02.ttibzz (mgr.14195) 936 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:16.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:15 vm02 bash[17473]: cluster 2026-03-10T08:53:14.766806+0000 mgr.vm02.ttibzz (mgr.14195) 937 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:16.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:15 vm02 bash[17473]: cluster 2026-03-10T08:53:14.766806+0000 mgr.vm02.ttibzz (mgr.14195) 937 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:17.571 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:17.732 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:17.732 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:17.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:17.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 117s ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:17.733 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 117s ago 14m - - 2026-03-10T08:53:17.930 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:17.931 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:17.931 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:17 vm02 bash[17473]: cluster 2026-03-10T08:53:16.767275+0000 mgr.vm02.ttibzz (mgr.14195) 938 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:18.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:17 vm02 bash[17473]: cluster 2026-03-10T08:53:16.767275+0000 mgr.vm02.ttibzz (mgr.14195) 938 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:19.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:18 vm02 bash[17473]: audit 2026-03-10T08:53:17.562375+0000 mgr.vm02.ttibzz (mgr.14195) 939 : audit [DBG] from='client.16442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:18 vm02 bash[17473]: audit 2026-03-10T08:53:17.562375+0000 mgr.vm02.ttibzz (mgr.14195) 939 : audit [DBG] from='client.16442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:18 vm02 bash[17473]: audit 2026-03-10T08:53:17.735981+0000 mgr.vm02.ttibzz (mgr.14195) 940 : audit [DBG] from='client.16446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:18 vm02 bash[17473]: audit 2026-03-10T08:53:17.735981+0000 mgr.vm02.ttibzz (mgr.14195) 940 : audit [DBG] from='client.16446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:18 vm02 bash[17473]: audit 2026-03-10T08:53:17.931296+0000 mon.vm07 (mon.1) 43 : audit [DBG] from='client.? 192.168.123.102:0/4170060421' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:18 vm02 bash[17473]: audit 2026-03-10T08:53:17.931296+0000 mon.vm07 (mon.1) 43 : audit [DBG] from='client.? 192.168.123.102:0/4170060421' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:19.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:19 vm02 bash[17473]: cluster 2026-03-10T08:53:18.767749+0000 mgr.vm02.ttibzz (mgr.14195) 941 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:19.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:19 vm02 bash[17473]: cluster 2026-03-10T08:53:18.767749+0000 mgr.vm02.ttibzz (mgr.14195) 941 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:19.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:19 vm02 bash[17473]: audit 2026-03-10T08:53:19.257071+0000 mon.vm02 (mon.0) 1174 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:19.993 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:19 vm02 bash[17473]: audit 2026-03-10T08:53:19.257071+0000 mon.vm02 (mon.0) 1174 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:21.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:20 vm02 bash[17473]: audit 2026-03-10T08:53:20.799855+0000 mon.vm02 (mon.0) 1175 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:53:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:20 vm02 bash[17473]: audit 2026-03-10T08:53:20.799855+0000 mon.vm02 (mon.0) 1175 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: cluster 2026-03-10T08:53:20.768253+0000 mgr.vm02.ttibzz (mgr.14195) 942 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: cluster 2026-03-10T08:53:20.768253+0000 mgr.vm02.ttibzz (mgr.14195) 942 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.112104+0000 mon.vm02 (mon.0) 1176 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.112104+0000 mon.vm02 (mon.0) 1176 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.112586+0000 mon.vm02 (mon.0) 1177 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.112586+0000 mon.vm02 (mon.0) 1177 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.117423+0000 mon.vm02 (mon.0) 1178 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.117423+0000 mon.vm02 (mon.0) 1178 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.118643+0000 mon.vm02 (mon.0) 1179 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:53:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:21 vm02 bash[17473]: audit 2026-03-10T08:53:21.118643+0000 mon.vm02 (mon.0) 1179 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:53:23.110 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:23.268 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:23.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:23.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:23.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:23.268 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:53:23.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:22 vm02 bash[17473]: cluster 2026-03-10T08:53:21.113629+0000 mgr.vm02.ttibzz (mgr.14195) 943 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:53:23.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:22 vm02 bash[17473]: cluster 2026-03-10T08:53:21.113629+0000 mgr.vm02.ttibzz (mgr.14195) 943 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:53:23.456 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:23.456 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:23.456 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:24.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:23 vm02 bash[17473]: audit 2026-03-10T08:53:23.461493+0000 mon.vm02 (mon.0) 1180 : audit [DBG] from='client.? 192.168.123.102:0/3410993082' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:24.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:23 vm02 bash[17473]: audit 2026-03-10T08:53:23.461493+0000 mon.vm02 (mon.0) 1180 : audit [DBG] from='client.? 192.168.123.102:0/3410993082' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:25.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:24 vm02 bash[17473]: audit 2026-03-10T08:53:23.099631+0000 mgr.vm02.ttibzz (mgr.14195) 944 : audit [DBG] from='client.16454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:25.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:24 vm02 bash[17473]: audit 2026-03-10T08:53:23.099631+0000 mgr.vm02.ttibzz (mgr.14195) 944 : audit [DBG] from='client.16454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:25.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:24 vm02 bash[17473]: cluster 2026-03-10T08:53:23.113981+0000 mgr.vm02.ttibzz (mgr.14195) 945 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:53:25.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:24 vm02 bash[17473]: cluster 2026-03-10T08:53:23.113981+0000 mgr.vm02.ttibzz (mgr.14195) 945 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:53:25.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:24 vm02 bash[17473]: audit 2026-03-10T08:53:23.271293+0000 mgr.vm02.ttibzz (mgr.14195) 946 : audit [DBG] from='client.16458 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:25.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:24 vm02 bash[17473]: audit 2026-03-10T08:53:23.271293+0000 mgr.vm02.ttibzz (mgr.14195) 946 : audit [DBG] from='client.16458 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:27.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:26 vm02 bash[17473]: cluster 2026-03-10T08:53:25.114347+0000 mgr.vm02.ttibzz (mgr.14195) 947 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:53:27.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:26 vm02 bash[17473]: cluster 2026-03-10T08:53:25.114347+0000 mgr.vm02.ttibzz (mgr.14195) 947 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:53:28.626 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:28.780 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:28.780 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:28.780 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:28.780 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:28.780 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:53:28.969 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:28.969 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:28.969 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:29.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:28 vm02 bash[17473]: cluster 2026-03-10T08:53:27.114766+0000 mgr.vm02.ttibzz (mgr.14195) 948 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:53:29.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:28 vm02 bash[17473]: cluster 2026-03-10T08:53:27.114766+0000 mgr.vm02.ttibzz (mgr.14195) 948 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:53:30.030 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:29 vm02 bash[17473]: audit 2026-03-10T08:53:28.617486+0000 mgr.vm02.ttibzz (mgr.14195) 949 : audit [DBG] from='client.16466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:29 vm02 bash[17473]: audit 2026-03-10T08:53:28.617486+0000 mgr.vm02.ttibzz (mgr.14195) 949 : audit [DBG] from='client.16466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:29 vm02 bash[17473]: audit 2026-03-10T08:53:28.783244+0000 mgr.vm02.ttibzz (mgr.14195) 950 : audit [DBG] from='client.16470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:29 vm02 bash[17473]: audit 2026-03-10T08:53:28.783244+0000 mgr.vm02.ttibzz (mgr.14195) 950 : audit [DBG] from='client.16470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:29 vm02 bash[17473]: audit 2026-03-10T08:53:28.975109+0000 mon.vm02 (mon.0) 1181 : audit [DBG] from='client.? 192.168.123.102:0/780948363' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:29 vm02 bash[17473]: audit 2026-03-10T08:53:28.975109+0000 mon.vm02 (mon.0) 1181 : audit [DBG] from='client.? 192.168.123.102:0/780948363' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:31.280 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:30 vm02 bash[17473]: cluster 2026-03-10T08:53:29.115198+0000 mgr.vm02.ttibzz (mgr.14195) 951 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:53:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:30 vm02 bash[17473]: cluster 2026-03-10T08:53:29.115198+0000 mgr.vm02.ttibzz (mgr.14195) 951 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:53:32.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:32 vm02 bash[17473]: cluster 2026-03-10T08:53:31.115594+0000 mgr.vm02.ttibzz (mgr.14195) 952 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 395 B/s wr, 0 op/s 2026-03-10T08:53:32.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:32 vm02 bash[17473]: cluster 2026-03-10T08:53:31.115594+0000 mgr.vm02.ttibzz (mgr.14195) 952 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 395 B/s wr, 0 op/s 2026-03-10T08:53:34.155 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:34.321 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:34.321 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 7m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:34.321 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (7m) 7m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:34.321 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:34.321 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:53:34.517 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:34.517 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:34.517 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:34.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:34 vm02 bash[17473]: cluster 2026-03-10T08:53:33.115951+0000 mgr.vm02.ttibzz (mgr.14195) 953 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:34.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:34 vm02 bash[17473]: cluster 2026-03-10T08:53:33.115951+0000 mgr.vm02.ttibzz (mgr.14195) 953 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:35.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.143003+0000 mgr.vm02.ttibzz (mgr.14195) 954 : audit [DBG] from='client.16478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:35.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.143003+0000 mgr.vm02.ttibzz (mgr.14195) 954 : audit [DBG] from='client.16478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:35.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.257090+0000 mon.vm02 (mon.0) 1182 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:35.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.257090+0000 mon.vm02 (mon.0) 1182 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:35.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.323314+0000 mgr.vm02.ttibzz (mgr.14195) 955 : audit [DBG] from='client.16482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:35.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.323314+0000 mgr.vm02.ttibzz (mgr.14195) 955 : audit [DBG] from='client.16482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:35.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.522807+0000 mon.vm02 (mon.0) 1183 : audit [DBG] from='client.? 192.168.123.102:0/1789027307' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:35.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:35 vm02 bash[17473]: audit 2026-03-10T08:53:34.522807+0000 mon.vm02 (mon.0) 1183 : audit [DBG] from='client.? 192.168.123.102:0/1789027307' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:36.530 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:36 vm02 bash[17473]: cluster 2026-03-10T08:53:35.116360+0000 mgr.vm02.ttibzz (mgr.14195) 956 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:36.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:36 vm02 bash[17473]: cluster 2026-03-10T08:53:35.116360+0000 mgr.vm02.ttibzz (mgr.14195) 956 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:38 vm02 bash[17473]: cluster 2026-03-10T08:53:37.116766+0000 mgr.vm02.ttibzz (mgr.14195) 957 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:38 vm02 bash[17473]: cluster 2026-03-10T08:53:37.116766+0000 mgr.vm02.ttibzz (mgr.14195) 957 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:39.695 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:39.848 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:39.848 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 8m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:39.848 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:39.848 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:39.848 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:53:40.039 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:40.039 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:40.039 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: cluster 2026-03-10T08:53:39.117129+0000 mgr.vm02.ttibzz (mgr.14195) 958 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: cluster 2026-03-10T08:53:39.117129+0000 mgr.vm02.ttibzz (mgr.14195) 958 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: audit 2026-03-10T08:53:39.684411+0000 mgr.vm02.ttibzz (mgr.14195) 959 : audit [DBG] from='client.16490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: audit 2026-03-10T08:53:39.684411+0000 mgr.vm02.ttibzz (mgr.14195) 959 : audit [DBG] from='client.16490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: audit 2026-03-10T08:53:39.850989+0000 mgr.vm02.ttibzz (mgr.14195) 960 : audit [DBG] from='client.16494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: audit 2026-03-10T08:53:39.850989+0000 mgr.vm02.ttibzz (mgr.14195) 960 : audit [DBG] from='client.16494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: audit 2026-03-10T08:53:40.045083+0000 mon.vm02 (mon.0) 1184 : audit [DBG] from='client.? 192.168.123.102:0/2083655030' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:40 vm02 bash[17473]: audit 2026-03-10T08:53:40.045083+0000 mon.vm02 (mon.0) 1184 : audit [DBG] from='client.? 192.168.123.102:0/2083655030' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:42.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:42 vm02 bash[17473]: cluster 2026-03-10T08:53:41.117528+0000 mgr.vm02.ttibzz (mgr.14195) 961 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:42.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:42 vm02 bash[17473]: cluster 2026-03-10T08:53:41.117528+0000 mgr.vm02.ttibzz (mgr.14195) 961 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:53:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:44 vm02 bash[17473]: cluster 2026-03-10T08:53:43.117915+0000 mgr.vm02.ttibzz (mgr.14195) 962 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:44 vm02 bash[17473]: cluster 2026-03-10T08:53:43.117915+0000 mgr.vm02.ttibzz (mgr.14195) 962 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:45.216 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:45.390 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:45.390 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 8m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:45.390 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:45.390 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:45.390 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:53:45.583 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:45.583 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:45.583 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: cluster 2026-03-10T08:53:45.118287+0000 mgr.vm02.ttibzz (mgr.14195) 963 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: cluster 2026-03-10T08:53:45.118287+0000 mgr.vm02.ttibzz (mgr.14195) 963 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: audit 2026-03-10T08:53:45.206406+0000 mgr.vm02.ttibzz (mgr.14195) 964 : audit [DBG] from='client.16502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: audit 2026-03-10T08:53:45.206406+0000 mgr.vm02.ttibzz (mgr.14195) 964 : audit [DBG] from='client.16502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: audit 2026-03-10T08:53:45.392578+0000 mgr.vm02.ttibzz (mgr.14195) 965 : audit [DBG] from='client.16506 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: audit 2026-03-10T08:53:45.392578+0000 mgr.vm02.ttibzz (mgr.14195) 965 : audit [DBG] from='client.16506 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: audit 2026-03-10T08:53:45.588430+0000 mon.vm02 (mon.0) 1185 : audit [DBG] from='client.? 192.168.123.102:0/2312059969' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:46.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:46 vm02 bash[17473]: audit 2026-03-10T08:53:45.588430+0000 mon.vm02 (mon.0) 1185 : audit [DBG] from='client.? 192.168.123.102:0/2312059969' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:48 vm02 bash[17473]: cluster 2026-03-10T08:53:47.118745+0000 mgr.vm02.ttibzz (mgr.14195) 966 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:48 vm02 bash[17473]: cluster 2026-03-10T08:53:47.118745+0000 mgr.vm02.ttibzz (mgr.14195) 966 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:49.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:49 vm02 bash[17473]: audit 2026-03-10T08:53:49.257399+0000 mon.vm02 (mon.0) 1186 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:49.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:49 vm02 bash[17473]: audit 2026-03-10T08:53:49.257399+0000 mon.vm02 (mon.0) 1186 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:53:50.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:50 vm02 bash[17473]: cluster 2026-03-10T08:53:49.119317+0000 mgr.vm02.ttibzz (mgr.14195) 967 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:50.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:50 vm02 bash[17473]: cluster 2026-03-10T08:53:49.119317+0000 mgr.vm02.ttibzz (mgr.14195) 967 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:50.760 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:50.909 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:50.909 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (8m) 8m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:50.909 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:50.909 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (2m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:50.909 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:53:51.096 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:51.096 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:51.096 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:51.780 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:51 vm02 bash[17473]: audit 2026-03-10T08:53:50.749179+0000 mgr.vm02.ttibzz (mgr.14195) 968 : audit [DBG] from='client.16514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:51.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:51 vm02 bash[17473]: audit 2026-03-10T08:53:50.749179+0000 mgr.vm02.ttibzz (mgr.14195) 968 : audit [DBG] from='client.16514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:51.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:51 vm02 bash[17473]: audit 2026-03-10T08:53:50.912688+0000 mgr.vm02.ttibzz (mgr.14195) 969 : audit [DBG] from='client.16518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:51.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:51 vm02 bash[17473]: audit 2026-03-10T08:53:50.912688+0000 mgr.vm02.ttibzz (mgr.14195) 969 : audit [DBG] from='client.16518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:51.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:51 vm02 bash[17473]: audit 2026-03-10T08:53:51.101981+0000 mon.vm02 (mon.0) 1187 : audit [DBG] from='client.? 192.168.123.102:0/3977418908' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:51.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:51 vm02 bash[17473]: audit 2026-03-10T08:53:51.101981+0000 mon.vm02 (mon.0) 1187 : audit [DBG] from='client.? 192.168.123.102:0/3977418908' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:52.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:52 vm02 bash[17473]: cluster 2026-03-10T08:53:51.119792+0000 mgr.vm02.ttibzz (mgr.14195) 970 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:52.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:52 vm02 bash[17473]: cluster 2026-03-10T08:53:51.119792+0000 mgr.vm02.ttibzz (mgr.14195) 970 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:54.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:54 vm02 bash[17473]: cluster 2026-03-10T08:53:53.120121+0000 mgr.vm02.ttibzz (mgr.14195) 971 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:53:54.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:54 vm02 bash[17473]: cluster 2026-03-10T08:53:53.120121+0000 mgr.vm02.ttibzz (mgr.14195) 971 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:53:56.265 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:53:56.415 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:53:56.415 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:53:56.415 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:53:56.415 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:53:56.415 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:53:56.606 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:53:56.606 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:53:56.606 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:53:56.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:56 vm02 bash[17473]: cluster 2026-03-10T08:53:55.120547+0000 mgr.vm02.ttibzz (mgr.14195) 972 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:56.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:56 vm02 bash[17473]: cluster 2026-03-10T08:53:55.120547+0000 mgr.vm02.ttibzz (mgr.14195) 972 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:57.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:57 vm02 bash[17473]: audit 2026-03-10T08:53:56.254724+0000 mgr.vm02.ttibzz (mgr.14195) 973 : audit [DBG] from='client.16526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:57.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:57 vm02 bash[17473]: audit 2026-03-10T08:53:56.254724+0000 mgr.vm02.ttibzz (mgr.14195) 973 : audit [DBG] from='client.16526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:57.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:57 vm02 bash[17473]: audit 2026-03-10T08:53:56.418293+0000 mgr.vm02.ttibzz (mgr.14195) 974 : audit [DBG] from='client.16530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:57.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:57 vm02 bash[17473]: audit 2026-03-10T08:53:56.418293+0000 mgr.vm02.ttibzz (mgr.14195) 974 : audit [DBG] from='client.16530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:53:57.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:57 vm02 bash[17473]: audit 2026-03-10T08:53:56.611426+0000 mon.vm02 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.102:0/3423068418' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:57.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:57 vm02 bash[17473]: audit 2026-03-10T08:53:56.611426+0000 mon.vm02 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.102:0/3423068418' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:53:58.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:58 vm02 bash[17473]: cluster 2026-03-10T08:53:57.120947+0000 mgr.vm02.ttibzz (mgr.14195) 975 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:53:58.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:53:58 vm02 bash[17473]: cluster 2026-03-10T08:53:57.120947+0000 mgr.vm02.ttibzz (mgr.14195) 975 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:00.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:00 vm02 bash[17473]: cluster 2026-03-10T08:53:59.121426+0000 mgr.vm02.ttibzz (mgr.14195) 976 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:00.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:00 vm02 bash[17473]: cluster 2026-03-10T08:53:59.121426+0000 mgr.vm02.ttibzz (mgr.14195) 976 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:01.783 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:01.930 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:01.930 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 14m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:01.930 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 14m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:01.930 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 2m ago 14m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:01.930 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 14m - - 2026-03-10T08:54:02.113 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:02.113 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:02.113 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: cluster 2026-03-10T08:54:01.121830+0000 mgr.vm02.ttibzz (mgr.14195) 977 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: cluster 2026-03-10T08:54:01.121830+0000 mgr.vm02.ttibzz (mgr.14195) 977 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: audit 2026-03-10T08:54:01.768656+0000 mgr.vm02.ttibzz (mgr.14195) 978 : audit [DBG] from='client.16538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: audit 2026-03-10T08:54:01.768656+0000 mgr.vm02.ttibzz (mgr.14195) 978 : audit [DBG] from='client.16538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: audit 2026-03-10T08:54:01.933323+0000 mgr.vm02.ttibzz (mgr.14195) 979 : audit [DBG] from='client.16542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: audit 2026-03-10T08:54:01.933323+0000 mgr.vm02.ttibzz (mgr.14195) 979 : audit [DBG] from='client.16542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: audit 2026-03-10T08:54:02.113569+0000 mon.vm07 (mon.1) 44 : audit [DBG] from='client.? 192.168.123.102:0/629026560' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:02 vm02 bash[17473]: audit 2026-03-10T08:54:02.113569+0000 mon.vm07 (mon.1) 44 : audit [DBG] from='client.? 192.168.123.102:0/629026560' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:04 vm02 bash[17473]: cluster 2026-03-10T08:54:03.122211+0000 mgr.vm02.ttibzz (mgr.14195) 980 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:04 vm02 bash[17473]: cluster 2026-03-10T08:54:03.122211+0000 mgr.vm02.ttibzz (mgr.14195) 980 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:04 vm02 bash[17473]: audit 2026-03-10T08:54:04.257508+0000 mon.vm02 (mon.0) 1189 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:05.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:04 vm02 bash[17473]: audit 2026-03-10T08:54:04.257508+0000 mon.vm02 (mon.0) 1189 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:07.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:06 vm02 bash[17473]: cluster 2026-03-10T08:54:05.122607+0000 mgr.vm02.ttibzz (mgr.14195) 981 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:07.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:06 vm02 bash[17473]: cluster 2026-03-10T08:54:05.122607+0000 mgr.vm02.ttibzz (mgr.14195) 981 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:07.283 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:07.434 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:07.434 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:07.434 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:07.434 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 2m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:07.434 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 15m - - 2026-03-10T08:54:07.619 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:07.619 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:07.619 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:07 vm02 bash[17473]: audit 2026-03-10T08:54:07.624760+0000 mon.vm02 (mon.0) 1190 : audit [DBG] from='client.? 192.168.123.102:0/3843679059' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:08.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:07 vm02 bash[17473]: audit 2026-03-10T08:54:07.624760+0000 mon.vm02 (mon.0) 1190 : audit [DBG] from='client.? 192.168.123.102:0/3843679059' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:08 vm02 bash[17473]: cluster 2026-03-10T08:54:07.123151+0000 mgr.vm02.ttibzz (mgr.14195) 982 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:08 vm02 bash[17473]: cluster 2026-03-10T08:54:07.123151+0000 mgr.vm02.ttibzz (mgr.14195) 982 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:08 vm02 bash[17473]: audit 2026-03-10T08:54:07.274123+0000 mgr.vm02.ttibzz (mgr.14195) 983 : audit [DBG] from='client.16550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:08 vm02 bash[17473]: audit 2026-03-10T08:54:07.274123+0000 mgr.vm02.ttibzz (mgr.14195) 983 : audit [DBG] from='client.16550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:08 vm02 bash[17473]: audit 2026-03-10T08:54:07.436683+0000 mgr.vm02.ttibzz (mgr.14195) 984 : audit [DBG] from='client.16554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:09.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:08 vm02 bash[17473]: audit 2026-03-10T08:54:07.436683+0000 mgr.vm02.ttibzz (mgr.14195) 984 : audit [DBG] from='client.16554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:10 vm02 bash[17473]: cluster 2026-03-10T08:54:09.123671+0000 mgr.vm02.ttibzz (mgr.14195) 985 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:10 vm02 bash[17473]: cluster 2026-03-10T08:54:09.123671+0000 mgr.vm02.ttibzz (mgr.14195) 985 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:12.803 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:12.952 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:12.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:12.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:12.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 2m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:12.952 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 15m - - 2026-03-10T08:54:13.144 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:13.144 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:13.144 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:13.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:12 vm02 bash[17473]: cluster 2026-03-10T08:54:11.124171+0000 mgr.vm02.ttibzz (mgr.14195) 986 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:13.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:12 vm02 bash[17473]: cluster 2026-03-10T08:54:11.124171+0000 mgr.vm02.ttibzz (mgr.14195) 986 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:13 vm02 bash[17473]: audit 2026-03-10T08:54:12.793757+0000 mgr.vm02.ttibzz (mgr.14195) 987 : audit [DBG] from='client.16562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:13 vm02 bash[17473]: audit 2026-03-10T08:54:12.793757+0000 mgr.vm02.ttibzz (mgr.14195) 987 : audit [DBG] from='client.16562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:13 vm02 bash[17473]: audit 2026-03-10T08:54:12.954957+0000 mgr.vm02.ttibzz (mgr.14195) 988 : audit [DBG] from='client.25649 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:13 vm02 bash[17473]: audit 2026-03-10T08:54:12.954957+0000 mgr.vm02.ttibzz (mgr.14195) 988 : audit [DBG] from='client.25649 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:13 vm02 bash[17473]: audit 2026-03-10T08:54:13.149530+0000 mon.vm02 (mon.0) 1191 : audit [DBG] from='client.? 192.168.123.102:0/840031272' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:14.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:13 vm02 bash[17473]: audit 2026-03-10T08:54:13.149530+0000 mon.vm02 (mon.0) 1191 : audit [DBG] from='client.? 192.168.123.102:0/840031272' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:14 vm02 bash[17473]: cluster 2026-03-10T08:54:13.124672+0000 mgr.vm02.ttibzz (mgr.14195) 989 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:14 vm02 bash[17473]: cluster 2026-03-10T08:54:13.124672+0000 mgr.vm02.ttibzz (mgr.14195) 989 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:17.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:16 vm02 bash[17473]: cluster 2026-03-10T08:54:15.125209+0000 mgr.vm02.ttibzz (mgr.14195) 990 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:17.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:16 vm02 bash[17473]: cluster 2026-03-10T08:54:15.125209+0000 mgr.vm02.ttibzz (mgr.14195) 990 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:18.318 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:18.474 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:18.474 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:18.474 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:18.474 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 2m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:18.474 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 2m ago 15m - - 2026-03-10T08:54:18.669 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:18.669 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:18.669 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:18 vm02 bash[17473]: cluster 2026-03-10T08:54:17.125632+0000 mgr.vm02.ttibzz (mgr.14195) 991 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:18 vm02 bash[17473]: cluster 2026-03-10T08:54:17.125632+0000 mgr.vm02.ttibzz (mgr.14195) 991 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:18 vm02 bash[17473]: audit 2026-03-10T08:54:18.674732+0000 mon.vm02 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.102:0/1975523884' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:18 vm02 bash[17473]: audit 2026-03-10T08:54:18.674732+0000 mon.vm02 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.102:0/1975523884' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:20.742 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:20 vm02 bash[17473]: audit 2026-03-10T08:54:18.307848+0000 mgr.vm02.ttibzz (mgr.14195) 992 : audit [DBG] from='client.16574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:20.742 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:20 vm02 bash[17473]: audit 2026-03-10T08:54:18.307848+0000 mgr.vm02.ttibzz (mgr.14195) 992 : audit [DBG] from='client.16574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:20.742 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:20 vm02 bash[17473]: audit 2026-03-10T08:54:18.476736+0000 mgr.vm02.ttibzz (mgr.14195) 993 : audit [DBG] from='client.16578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:20.742 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:20 vm02 bash[17473]: audit 2026-03-10T08:54:18.476736+0000 mgr.vm02.ttibzz (mgr.14195) 993 : audit [DBG] from='client.16578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:20.742 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:20 vm02 bash[17473]: audit 2026-03-10T08:54:19.257780+0000 mon.vm02 (mon.0) 1193 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:20.742 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:20 vm02 bash[17473]: audit 2026-03-10T08:54:19.257780+0000 mon.vm02 (mon.0) 1193 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:21.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:21 vm02 bash[17473]: cluster 2026-03-10T08:54:19.126227+0000 mgr.vm02.ttibzz (mgr.14195) 994 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:21.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:21 vm02 bash[17473]: cluster 2026-03-10T08:54:19.126227+0000 mgr.vm02.ttibzz (mgr.14195) 994 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:21.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:21 vm02 bash[17473]: audit 2026-03-10T08:54:21.157805+0000 mon.vm02 (mon.0) 1194 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:54:21.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:21 vm02 bash[17473]: audit 2026-03-10T08:54:21.157805+0000 mon.vm02 (mon.0) 1194 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: cluster 2026-03-10T08:54:21.126632+0000 mgr.vm02.ttibzz (mgr.14195) 995 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: cluster 2026-03-10T08:54:21.126632+0000 mgr.vm02.ttibzz (mgr.14195) 995 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.496243+0000 mon.vm02 (mon.0) 1195 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.496243+0000 mon.vm02 (mon.0) 1195 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.496849+0000 mon.vm02 (mon.0) 1196 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.496849+0000 mon.vm02 (mon.0) 1196 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: cluster 2026-03-10T08:54:21.498042+0000 mgr.vm02.ttibzz (mgr.14195) 996 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: cluster 2026-03-10T08:54:21.498042+0000 mgr.vm02.ttibzz (mgr.14195) 996 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.501312+0000 mon.vm02 (mon.0) 1197 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.501312+0000 mon.vm02 (mon.0) 1197 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.502795+0000 mon.vm02 (mon.0) 1198 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:54:23.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:22 vm02 bash[17473]: audit 2026-03-10T08:54:21.502795+0000 mon.vm02 (mon.0) 1198 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:54:23.859 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:24.016 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:24.017 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:24.017 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:24.017 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 3m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:24.017 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:54:24.208 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:24.208 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:24.208 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: cluster 2026-03-10T08:54:23.498458+0000 mgr.vm02.ttibzz (mgr.14195) 997 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: cluster 2026-03-10T08:54:23.498458+0000 mgr.vm02.ttibzz (mgr.14195) 997 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: audit 2026-03-10T08:54:23.848441+0000 mgr.vm02.ttibzz (mgr.14195) 998 : audit [DBG] from='client.16586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: audit 2026-03-10T08:54:23.848441+0000 mgr.vm02.ttibzz (mgr.14195) 998 : audit [DBG] from='client.16586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: audit 2026-03-10T08:54:24.019155+0000 mgr.vm02.ttibzz (mgr.14195) 999 : audit [DBG] from='client.16590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: audit 2026-03-10T08:54:24.019155+0000 mgr.vm02.ttibzz (mgr.14195) 999 : audit [DBG] from='client.16590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: audit 2026-03-10T08:54:24.213806+0000 mon.vm02 (mon.0) 1199 : audit [DBG] from='client.? 192.168.123.102:0/1225033491' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:24 vm02 bash[17473]: audit 2026-03-10T08:54:24.213806+0000 mon.vm02 (mon.0) 1199 : audit [DBG] from='client.? 192.168.123.102:0/1225033491' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:27.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:26 vm02 bash[17473]: cluster 2026-03-10T08:54:25.498948+0000 mgr.vm02.ttibzz (mgr.14195) 1000 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:54:27.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:26 vm02 bash[17473]: cluster 2026-03-10T08:54:25.498948+0000 mgr.vm02.ttibzz (mgr.14195) 1000 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:54:29.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:28 vm02 bash[17473]: cluster 2026-03-10T08:54:27.499372+0000 mgr.vm02.ttibzz (mgr.14195) 1001 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:54:29.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:28 vm02 bash[17473]: cluster 2026-03-10T08:54:27.499372+0000 mgr.vm02.ttibzz (mgr.14195) 1001 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-03-10T08:54:29.391 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:29.548 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:29.548 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:29.548 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (8m) 8m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:29.548 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 3m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:29.548 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:54:29.748 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:29.748 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:29.748 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:29 vm02 bash[17473]: audit 2026-03-10T08:54:29.752987+0000 mon.vm02 (mon.0) 1200 : audit [DBG] from='client.? 192.168.123.102:0/1352296535' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:30.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:29 vm02 bash[17473]: audit 2026-03-10T08:54:29.752987+0000 mon.vm02 (mon.0) 1200 : audit [DBG] from='client.? 192.168.123.102:0/1352296535' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:30 vm02 bash[17473]: audit 2026-03-10T08:54:29.380983+0000 mgr.vm02.ttibzz (mgr.14195) 1002 : audit [DBG] from='client.16598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:30 vm02 bash[17473]: audit 2026-03-10T08:54:29.380983+0000 mgr.vm02.ttibzz (mgr.14195) 1002 : audit [DBG] from='client.16598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:30 vm02 bash[17473]: cluster 2026-03-10T08:54:29.499771+0000 mgr.vm02.ttibzz (mgr.14195) 1003 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 394 B/s wr, 0 op/s 2026-03-10T08:54:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:30 vm02 bash[17473]: cluster 2026-03-10T08:54:29.499771+0000 mgr.vm02.ttibzz (mgr.14195) 1003 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 394 B/s wr, 0 op/s 2026-03-10T08:54:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:30 vm02 bash[17473]: audit 2026-03-10T08:54:29.550778+0000 mgr.vm02.ttibzz (mgr.14195) 1004 : audit [DBG] from='client.16602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:31.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:30 vm02 bash[17473]: audit 2026-03-10T08:54:29.550778+0000 mgr.vm02.ttibzz (mgr.14195) 1004 : audit [DBG] from='client.16602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:32 vm02 bash[17473]: cluster 2026-03-10T08:54:31.500264+0000 mgr.vm02.ttibzz (mgr.14195) 1005 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 394 B/s wr, 0 op/s 2026-03-10T08:54:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:32 vm02 bash[17473]: cluster 2026-03-10T08:54:31.500264+0000 mgr.vm02.ttibzz (mgr.14195) 1005 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 197 B/s rd, 394 B/s wr, 0 op/s 2026-03-10T08:54:34.952 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:35.120 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:35.120 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 8m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:35.120 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 8m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:35.120 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 3m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:35.120 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:54:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:34 vm02 bash[17473]: cluster 2026-03-10T08:54:33.500734+0000 mgr.vm02.ttibzz (mgr.14195) 1006 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:34 vm02 bash[17473]: cluster 2026-03-10T08:54:33.500734+0000 mgr.vm02.ttibzz (mgr.14195) 1006 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:34 vm02 bash[17473]: audit 2026-03-10T08:54:34.258208+0000 mon.vm02 (mon.0) 1201 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:34 vm02 bash[17473]: audit 2026-03-10T08:54:34.258208+0000 mon.vm02 (mon.0) 1201 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:35.338 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:35.338 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:35.338 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:36.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:36 vm02 bash[17473]: audit 2026-03-10T08:54:34.927082+0000 mgr.vm02.ttibzz (mgr.14195) 1007 : audit [DBG] from='client.16610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:36.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:36 vm02 bash[17473]: audit 2026-03-10T08:54:34.927082+0000 mgr.vm02.ttibzz (mgr.14195) 1007 : audit [DBG] from='client.16610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:36.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:36 vm02 bash[17473]: audit 2026-03-10T08:54:35.343528+0000 mon.vm02 (mon.0) 1202 : audit [DBG] from='client.? 192.168.123.102:0/3091798477' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:36.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:36 vm02 bash[17473]: audit 2026-03-10T08:54:35.343528+0000 mon.vm02 (mon.0) 1202 : audit [DBG] from='client.? 192.168.123.102:0/3091798477' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:37 vm02 bash[17473]: audit 2026-03-10T08:54:35.122880+0000 mgr.vm02.ttibzz (mgr.14195) 1008 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:37 vm02 bash[17473]: audit 2026-03-10T08:54:35.122880+0000 mgr.vm02.ttibzz (mgr.14195) 1008 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:37 vm02 bash[17473]: cluster 2026-03-10T08:54:35.501287+0000 mgr.vm02.ttibzz (mgr.14195) 1009 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:37 vm02 bash[17473]: cluster 2026-03-10T08:54:35.501287+0000 mgr.vm02.ttibzz (mgr.14195) 1009 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:38 vm02 bash[17473]: cluster 2026-03-10T08:54:37.501779+0000 mgr.vm02.ttibzz (mgr.14195) 1010 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:38 vm02 bash[17473]: cluster 2026-03-10T08:54:37.501779+0000 mgr.vm02.ttibzz (mgr.14195) 1010 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:40.524 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:40.683 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:40.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 9m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:40.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:40.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 3m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:40.683 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:54:40.883 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:40.883 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:40.883 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:41.013 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:40 vm02 bash[17473]: cluster 2026-03-10T08:54:39.502180+0000 mgr.vm02.ttibzz (mgr.14195) 1011 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:41.013 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:40 vm02 bash[17473]: cluster 2026-03-10T08:54:39.502180+0000 mgr.vm02.ttibzz (mgr.14195) 1011 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:54:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:41 vm02 bash[17473]: audit 2026-03-10T08:54:40.512105+0000 mgr.vm02.ttibzz (mgr.14195) 1012 : audit [DBG] from='client.16622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:41 vm02 bash[17473]: audit 2026-03-10T08:54:40.512105+0000 mgr.vm02.ttibzz (mgr.14195) 1012 : audit [DBG] from='client.16622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:41 vm02 bash[17473]: audit 2026-03-10T08:54:40.686065+0000 mgr.vm02.ttibzz (mgr.14195) 1013 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:41 vm02 bash[17473]: audit 2026-03-10T08:54:40.686065+0000 mgr.vm02.ttibzz (mgr.14195) 1013 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:41 vm02 bash[17473]: audit 2026-03-10T08:54:40.888730+0000 mon.vm02 (mon.0) 1203 : audit [DBG] from='client.? 192.168.123.102:0/1960973914' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:42.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:41 vm02 bash[17473]: audit 2026-03-10T08:54:40.888730+0000 mon.vm02 (mon.0) 1203 : audit [DBG] from='client.? 192.168.123.102:0/1960973914' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:43.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:42 vm02 bash[17473]: cluster 2026-03-10T08:54:41.502742+0000 mgr.vm02.ttibzz (mgr.14195) 1014 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:43.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:42 vm02 bash[17473]: cluster 2026-03-10T08:54:41.502742+0000 mgr.vm02.ttibzz (mgr.14195) 1014 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:45.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:44 vm02 bash[17473]: cluster 2026-03-10T08:54:43.503205+0000 mgr.vm02.ttibzz (mgr.14195) 1015 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:45.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:44 vm02 bash[17473]: cluster 2026-03-10T08:54:43.503205+0000 mgr.vm02.ttibzz (mgr.14195) 1015 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:46.072 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:46.255 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:46.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 9m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:46.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:46.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (3m) 3m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:46.255 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:54:46.440 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:46.440 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:46.440 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:47.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:46 vm02 bash[17473]: cluster 2026-03-10T08:54:45.503674+0000 mgr.vm02.ttibzz (mgr.14195) 1016 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:47.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:46 vm02 bash[17473]: cluster 2026-03-10T08:54:45.503674+0000 mgr.vm02.ttibzz (mgr.14195) 1016 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:47.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:46 vm02 bash[17473]: audit 2026-03-10T08:54:46.445847+0000 mon.vm02 (mon.0) 1204 : audit [DBG] from='client.? 192.168.123.102:0/1543507213' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:47.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:46 vm02 bash[17473]: audit 2026-03-10T08:54:46.445847+0000 mon.vm02 (mon.0) 1204 : audit [DBG] from='client.? 192.168.123.102:0/1543507213' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:47 vm02 bash[17473]: audit 2026-03-10T08:54:46.059210+0000 mgr.vm02.ttibzz (mgr.14195) 1017 : audit [DBG] from='client.16634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:47 vm02 bash[17473]: audit 2026-03-10T08:54:46.059210+0000 mgr.vm02.ttibzz (mgr.14195) 1017 : audit [DBG] from='client.16634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:47 vm02 bash[17473]: audit 2026-03-10T08:54:46.258447+0000 mgr.vm02.ttibzz (mgr.14195) 1018 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:48.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:47 vm02 bash[17473]: audit 2026-03-10T08:54:46.258447+0000 mgr.vm02.ttibzz (mgr.14195) 1018 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:49.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:48 vm02 bash[17473]: cluster 2026-03-10T08:54:47.504045+0000 mgr.vm02.ttibzz (mgr.14195) 1019 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:49.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:48 vm02 bash[17473]: cluster 2026-03-10T08:54:47.504045+0000 mgr.vm02.ttibzz (mgr.14195) 1019 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:50.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:49 vm02 bash[17473]: audit 2026-03-10T08:54:49.258369+0000 mon.vm02 (mon.0) 1205 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:50.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:49 vm02 bash[17473]: audit 2026-03-10T08:54:49.258369+0000 mon.vm02 (mon.0) 1205 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:54:51.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:50 vm02 bash[17473]: cluster 2026-03-10T08:54:49.504450+0000 mgr.vm02.ttibzz (mgr.14195) 1020 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:51.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:50 vm02 bash[17473]: cluster 2026-03-10T08:54:49.504450+0000 mgr.vm02.ttibzz (mgr.14195) 1020 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:51.620 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:51.791 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:51.791 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (9m) 9m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:51.791 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:51.791 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 3m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:51.791 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:54:51.987 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:51.987 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:51.987 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: cluster 2026-03-10T08:54:51.504920+0000 mgr.vm02.ttibzz (mgr.14195) 1021 : cluster [DBG] pgmap v602: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: cluster 2026-03-10T08:54:51.504920+0000 mgr.vm02.ttibzz (mgr.14195) 1021 : cluster [DBG] pgmap v602: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: audit 2026-03-10T08:54:51.610075+0000 mgr.vm02.ttibzz (mgr.14195) 1022 : audit [DBG] from='client.16646 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: audit 2026-03-10T08:54:51.610075+0000 mgr.vm02.ttibzz (mgr.14195) 1022 : audit [DBG] from='client.16646 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: audit 2026-03-10T08:54:51.793720+0000 mgr.vm02.ttibzz (mgr.14195) 1023 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: audit 2026-03-10T08:54:51.793720+0000 mgr.vm02.ttibzz (mgr.14195) 1023 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: audit 2026-03-10T08:54:51.992301+0000 mon.vm02 (mon.0) 1206 : audit [DBG] from='client.? 192.168.123.102:0/1825142476' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:52 vm02 bash[17473]: audit 2026-03-10T08:54:51.992301+0000 mon.vm02 (mon.0) 1206 : audit [DBG] from='client.? 192.168.123.102:0/1825142476' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:55.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:54 vm02 bash[17473]: cluster 2026-03-10T08:54:53.505304+0000 mgr.vm02.ttibzz (mgr.14195) 1024 : cluster [DBG] pgmap v603: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:55.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:54 vm02 bash[17473]: cluster 2026-03-10T08:54:53.505304+0000 mgr.vm02.ttibzz (mgr.14195) 1024 : cluster [DBG] pgmap v603: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:54:57.172 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:54:57.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:56 vm02 bash[17473]: cluster 2026-03-10T08:54:55.505774+0000 mgr.vm02.ttibzz (mgr.14195) 1025 : cluster [DBG] pgmap v604: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:57.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:56 vm02 bash[17473]: cluster 2026-03-10T08:54:55.505774+0000 mgr.vm02.ttibzz (mgr.14195) 1025 : cluster [DBG] pgmap v604: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:54:57.330 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:54:57.330 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:54:57.330 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:54:57.330 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 3m ago 15m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:54:57.330 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:54:57.523 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:54:57.523 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:54:57.523 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:54:58.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:57 vm02 bash[17473]: audit 2026-03-10T08:54:57.527937+0000 mon.vm02 (mon.0) 1207 : audit [DBG] from='client.? 192.168.123.102:0/3749919035' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:58.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:57 vm02 bash[17473]: audit 2026-03-10T08:54:57.527937+0000 mon.vm02 (mon.0) 1207 : audit [DBG] from='client.? 192.168.123.102:0/3749919035' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:54:59.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:58 vm02 bash[17473]: audit 2026-03-10T08:54:57.161090+0000 mgr.vm02.ttibzz (mgr.14195) 1026 : audit [DBG] from='client.16658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:59.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:58 vm02 bash[17473]: audit 2026-03-10T08:54:57.161090+0000 mgr.vm02.ttibzz (mgr.14195) 1026 : audit [DBG] from='client.16658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:59.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:58 vm02 bash[17473]: audit 2026-03-10T08:54:57.332525+0000 mgr.vm02.ttibzz (mgr.14195) 1027 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:59.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:58 vm02 bash[17473]: audit 2026-03-10T08:54:57.332525+0000 mgr.vm02.ttibzz (mgr.14195) 1027 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:54:59.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:58 vm02 bash[17473]: cluster 2026-03-10T08:54:57.506218+0000 mgr.vm02.ttibzz (mgr.14195) 1028 : cluster [DBG] pgmap v605: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 7.3 KiB/s rd, 170 B/s wr, 12 op/s 2026-03-10T08:54:59.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:54:58 vm02 bash[17473]: cluster 2026-03-10T08:54:57.506218+0000 mgr.vm02.ttibzz (mgr.14195) 1028 : cluster [DBG] pgmap v605: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 7.3 KiB/s rd, 170 B/s wr, 12 op/s 2026-03-10T08:55:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:00 vm02 bash[17473]: cluster 2026-03-10T08:54:59.506645+0000 mgr.vm02.ttibzz (mgr.14195) 1029 : cluster [DBG] pgmap v606: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 32 op/s 2026-03-10T08:55:00.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:00 vm02 bash[17473]: cluster 2026-03-10T08:54:59.506645+0000 mgr.vm02.ttibzz (mgr.14195) 1029 : cluster [DBG] pgmap v606: 129 pgs: 129 active+clean; 454 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 32 op/s 2026-03-10T08:55:02.715 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:02.882 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:02.882 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 15m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:02.882 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 15m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:02.882 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 3m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:02.882 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 15m - - 2026-03-10T08:55:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:02 vm02 bash[17473]: cluster 2026-03-10T08:55:01.507334+0000 mgr.vm02.ttibzz (mgr.14195) 1030 : cluster [DBG] pgmap v607: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:55:03.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:02 vm02 bash[17473]: cluster 2026-03-10T08:55:01.507334+0000 mgr.vm02.ttibzz (mgr.14195) 1030 : cluster [DBG] pgmap v607: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:55:03.077 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:03.077 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:03.077 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:03 vm02 bash[17473]: audit 2026-03-10T08:55:02.705486+0000 mgr.vm02.ttibzz (mgr.14195) 1031 : audit [DBG] from='client.16670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:03 vm02 bash[17473]: audit 2026-03-10T08:55:02.705486+0000 mgr.vm02.ttibzz (mgr.14195) 1031 : audit [DBG] from='client.16670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:03 vm02 bash[17473]: audit 2026-03-10T08:55:02.884380+0000 mgr.vm02.ttibzz (mgr.14195) 1032 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:03 vm02 bash[17473]: audit 2026-03-10T08:55:02.884380+0000 mgr.vm02.ttibzz (mgr.14195) 1032 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:03 vm02 bash[17473]: audit 2026-03-10T08:55:03.081736+0000 mon.vm02 (mon.0) 1208 : audit [DBG] from='client.? 192.168.123.102:0/4217181873' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:04.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:03 vm02 bash[17473]: audit 2026-03-10T08:55:03.081736+0000 mon.vm02 (mon.0) 1208 : audit [DBG] from='client.? 192.168.123.102:0/4217181873' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:05.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:04 vm02 bash[17473]: cluster 2026-03-10T08:55:03.507922+0000 mgr.vm02.ttibzz (mgr.14195) 1033 : cluster [DBG] pgmap v608: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:55:05.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:04 vm02 bash[17473]: cluster 2026-03-10T08:55:03.507922+0000 mgr.vm02.ttibzz (mgr.14195) 1033 : cluster [DBG] pgmap v608: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:55:05.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:04 vm02 bash[17473]: audit 2026-03-10T08:55:04.258781+0000 mon.vm02 (mon.0) 1209 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:05.283 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:04 vm02 bash[17473]: audit 2026-03-10T08:55:04.258781+0000 mon.vm02 (mon.0) 1209 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:07.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:07 vm02 bash[17473]: cluster 2026-03-10T08:55:05.508465+0000 mgr.vm02.ttibzz (mgr.14195) 1034 : cluster [DBG] pgmap v609: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:55:07.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:07 vm02 bash[17473]: cluster 2026-03-10T08:55:05.508465+0000 mgr.vm02.ttibzz (mgr.14195) 1034 : cluster [DBG] pgmap v609: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-03-10T08:55:08.294 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:08.452 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:08.452 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:08.452 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:08.452 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 3m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:08.452 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 16m - - 2026-03-10T08:55:08.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:08 vm02 bash[17473]: cluster 2026-03-10T08:55:07.508872+0000 mgr.vm02.ttibzz (mgr.14195) 1035 : cluster [DBG] pgmap v610: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:08.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:08 vm02 bash[17473]: cluster 2026-03-10T08:55:07.508872+0000 mgr.vm02.ttibzz (mgr.14195) 1035 : cluster [DBG] pgmap v610: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:08.653 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:08.653 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:08.653 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:09 vm02 bash[17473]: audit 2026-03-10T08:55:08.283121+0000 mgr.vm02.ttibzz (mgr.14195) 1036 : audit [DBG] from='client.16682 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:09 vm02 bash[17473]: audit 2026-03-10T08:55:08.283121+0000 mgr.vm02.ttibzz (mgr.14195) 1036 : audit [DBG] from='client.16682 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:09 vm02 bash[17473]: audit 2026-03-10T08:55:08.454483+0000 mgr.vm02.ttibzz (mgr.14195) 1037 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:09 vm02 bash[17473]: audit 2026-03-10T08:55:08.454483+0000 mgr.vm02.ttibzz (mgr.14195) 1037 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:09 vm02 bash[17473]: audit 2026-03-10T08:55:08.658679+0000 mon.vm02 (mon.0) 1210 : audit [DBG] from='client.? 192.168.123.102:0/1802844293' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:10.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:09 vm02 bash[17473]: audit 2026-03-10T08:55:08.658679+0000 mon.vm02 (mon.0) 1210 : audit [DBG] from='client.? 192.168.123.102:0/1802844293' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:11.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:10 vm02 bash[17473]: cluster 2026-03-10T08:55:09.509253+0000 mgr.vm02.ttibzz (mgr.14195) 1038 : cluster [DBG] pgmap v611: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 341 B/s wr, 47 op/s 2026-03-10T08:55:11.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:10 vm02 bash[17473]: cluster 2026-03-10T08:55:09.509253+0000 mgr.vm02.ttibzz (mgr.14195) 1038 : cluster [DBG] pgmap v611: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 29 KiB/s rd, 341 B/s wr, 47 op/s 2026-03-10T08:55:13.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:12 vm02 bash[17473]: cluster 2026-03-10T08:55:11.509686+0000 mgr.vm02.ttibzz (mgr.14195) 1039 : cluster [DBG] pgmap v612: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 170 B/s wr, 27 op/s 2026-03-10T08:55:13.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:12 vm02 bash[17473]: cluster 2026-03-10T08:55:11.509686+0000 mgr.vm02.ttibzz (mgr.14195) 1039 : cluster [DBG] pgmap v612: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 170 B/s wr, 27 op/s 2026-03-10T08:55:13.856 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:14.031 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:14.031 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:14.032 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:14.032 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 3m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:14.032 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 16m - - 2026-03-10T08:55:14.247 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:14.247 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:14.247 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: cluster 2026-03-10T08:55:13.510246+0000 mgr.vm02.ttibzz (mgr.14195) 1040 : cluster [DBG] pgmap v613: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: cluster 2026-03-10T08:55:13.510246+0000 mgr.vm02.ttibzz (mgr.14195) 1040 : cluster [DBG] pgmap v613: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: audit 2026-03-10T08:55:13.842506+0000 mgr.vm02.ttibzz (mgr.14195) 1041 : audit [DBG] from='client.16694 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: audit 2026-03-10T08:55:13.842506+0000 mgr.vm02.ttibzz (mgr.14195) 1041 : audit [DBG] from='client.16694 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: audit 2026-03-10T08:55:14.033864+0000 mgr.vm02.ttibzz (mgr.14195) 1042 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: audit 2026-03-10T08:55:14.033864+0000 mgr.vm02.ttibzz (mgr.14195) 1042 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: audit 2026-03-10T08:55:14.252317+0000 mon.vm02 (mon.0) 1211 : audit [DBG] from='client.? 192.168.123.102:0/2810468983' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:15.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:14 vm02 bash[17473]: audit 2026-03-10T08:55:14.252317+0000 mon.vm02 (mon.0) 1211 : audit [DBG] from='client.? 192.168.123.102:0/2810468983' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:16.514 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:16 vm02 bash[17473]: cluster 2026-03-10T08:55:15.510678+0000 mgr.vm02.ttibzz (mgr.14195) 1043 : cluster [DBG] pgmap v614: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:16.514 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:16 vm02 bash[17473]: cluster 2026-03-10T08:55:15.510678+0000 mgr.vm02.ttibzz (mgr.14195) 1043 : cluster [DBG] pgmap v614: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:19.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:18 vm02 bash[17473]: cluster 2026-03-10T08:55:17.511143+0000 mgr.vm02.ttibzz (mgr.14195) 1044 : cluster [DBG] pgmap v615: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:19.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:18 vm02 bash[17473]: cluster 2026-03-10T08:55:17.511143+0000 mgr.vm02.ttibzz (mgr.14195) 1044 : cluster [DBG] pgmap v615: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:19.445 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:19.608 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:19.608 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:19.608 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:19.608 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 3m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:19.608 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 3m ago 16m - - 2026-03-10T08:55:19.820 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:19.821 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:19.821 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:19 vm02 bash[17473]: audit 2026-03-10T08:55:19.258896+0000 mon.vm02 (mon.0) 1212 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:20.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:19 vm02 bash[17473]: audit 2026-03-10T08:55:19.258896+0000 mon.vm02 (mon.0) 1212 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: audit 2026-03-10T08:55:19.427299+0000 mgr.vm02.ttibzz (mgr.14195) 1045 : audit [DBG] from='client.16706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: audit 2026-03-10T08:55:19.427299+0000 mgr.vm02.ttibzz (mgr.14195) 1045 : audit [DBG] from='client.16706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: cluster 2026-03-10T08:55:19.511514+0000 mgr.vm02.ttibzz (mgr.14195) 1046 : cluster [DBG] pgmap v616: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: cluster 2026-03-10T08:55:19.511514+0000 mgr.vm02.ttibzz (mgr.14195) 1046 : cluster [DBG] pgmap v616: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: audit 2026-03-10T08:55:19.610958+0000 mgr.vm02.ttibzz (mgr.14195) 1047 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: audit 2026-03-10T08:55:19.610958+0000 mgr.vm02.ttibzz (mgr.14195) 1047 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: audit 2026-03-10T08:55:19.825988+0000 mon.vm02 (mon.0) 1213 : audit [DBG] from='client.? 192.168.123.102:0/2946596579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:20 vm02 bash[17473]: audit 2026-03-10T08:55:19.825988+0000 mon.vm02 (mon.0) 1213 : audit [DBG] from='client.? 192.168.123.102:0/2946596579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:22 vm02 bash[17473]: audit 2026-03-10T08:55:21.546036+0000 mon.vm02 (mon.0) 1214 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:55:22.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:22 vm02 bash[17473]: audit 2026-03-10T08:55:21.546036+0000 mon.vm02 (mon.0) 1214 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: cluster 2026-03-10T08:55:21.511941+0000 mgr.vm02.ttibzz (mgr.14195) 1048 : cluster [DBG] pgmap v617: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: cluster 2026-03-10T08:55:21.511941+0000 mgr.vm02.ttibzz (mgr.14195) 1048 : cluster [DBG] pgmap v617: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:21.925173+0000 mon.vm02 (mon.0) 1215 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:21.925173+0000 mon.vm02 (mon.0) 1215 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:21.926245+0000 mon.vm02 (mon.0) 1216 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:21.926245+0000 mon.vm02 (mon.0) 1216 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: cluster 2026-03-10T08:55:21.927232+0000 mgr.vm02.ttibzz (mgr.14195) 1049 : cluster [DBG] pgmap v618: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: cluster 2026-03-10T08:55:21.927232+0000 mgr.vm02.ttibzz (mgr.14195) 1049 : cluster [DBG] pgmap v618: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:22.025974+0000 mon.vm02 (mon.0) 1217 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:22.025974+0000 mon.vm02 (mon.0) 1217 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:55:23.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:22.028160+0000 mon.vm02 (mon.0) 1218 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:55:23.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:23 vm02 bash[17473]: audit 2026-03-10T08:55:22.028160+0000 mon.vm02 (mon.0) 1218 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:55:24.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:24 vm02 bash[17473]: cluster 2026-03-10T08:55:23.927834+0000 mgr.vm02.ttibzz (mgr.14195) 1050 : cluster [DBG] pgmap v619: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:55:24.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:24 vm02 bash[17473]: cluster 2026-03-10T08:55:23.927834+0000 mgr.vm02.ttibzz (mgr.14195) 1050 : cluster [DBG] pgmap v619: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:55:25.011 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:25.185 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:25.185 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:25.185 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:25.185 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:25.185 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:55:25.395 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:25.395 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:25.395 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:25.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:25 vm02 bash[17473]: audit 2026-03-10T08:55:24.999078+0000 mgr.vm02.ttibzz (mgr.14195) 1051 : audit [DBG] from='client.16718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:25.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:25 vm02 bash[17473]: audit 2026-03-10T08:55:24.999078+0000 mgr.vm02.ttibzz (mgr.14195) 1051 : audit [DBG] from='client.16718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:26.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:26 vm02 bash[17473]: audit 2026-03-10T08:55:25.186982+0000 mgr.vm02.ttibzz (mgr.14195) 1052 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:26.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:26 vm02 bash[17473]: audit 2026-03-10T08:55:25.186982+0000 mgr.vm02.ttibzz (mgr.14195) 1052 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:26.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:26 vm02 bash[17473]: audit 2026-03-10T08:55:25.399948+0000 mon.vm02 (mon.0) 1219 : audit [DBG] from='client.? 192.168.123.102:0/3701620706' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:26.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:26 vm02 bash[17473]: audit 2026-03-10T08:55:25.399948+0000 mon.vm02 (mon.0) 1219 : audit [DBG] from='client.? 192.168.123.102:0/3701620706' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:26.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:26 vm02 bash[17473]: cluster 2026-03-10T08:55:25.928282+0000 mgr.vm02.ttibzz (mgr.14195) 1053 : cluster [DBG] pgmap v620: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-03-10T08:55:26.643 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:26 vm02 bash[17473]: cluster 2026-03-10T08:55:25.928282+0000 mgr.vm02.ttibzz (mgr.14195) 1053 : cluster [DBG] pgmap v620: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-03-10T08:55:28.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:28 vm02 bash[17473]: cluster 2026-03-10T08:55:27.928692+0000 mgr.vm02.ttibzz (mgr.14195) 1054 : cluster [DBG] pgmap v621: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-03-10T08:55:28.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:28 vm02 bash[17473]: cluster 2026-03-10T08:55:27.928692+0000 mgr.vm02.ttibzz (mgr.14195) 1054 : cluster [DBG] pgmap v621: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-03-10T08:55:30.573 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:30.724 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:30.724 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:30.724 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (9m) 9m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:30.724 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:30.724 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:55:30.914 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:30.914 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:30.914 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:31.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:31 vm02 bash[17473]: cluster 2026-03-10T08:55:29.929121+0000 mgr.vm02.ttibzz (mgr.14195) 1055 : cluster [DBG] pgmap v622: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-03-10T08:55:31.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:31 vm02 bash[17473]: cluster 2026-03-10T08:55:29.929121+0000 mgr.vm02.ttibzz (mgr.14195) 1055 : cluster [DBG] pgmap v622: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-03-10T08:55:31.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:31 vm02 bash[17473]: audit 2026-03-10T08:55:30.919510+0000 mon.vm02 (mon.0) 1220 : audit [DBG] from='client.? 192.168.123.102:0/2436350323' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:31.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:31 vm02 bash[17473]: audit 2026-03-10T08:55:30.919510+0000 mon.vm02 (mon.0) 1220 : audit [DBG] from='client.? 192.168.123.102:0/2436350323' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:32.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:32 vm02 bash[17473]: audit 2026-03-10T08:55:30.559310+0000 mgr.vm02.ttibzz (mgr.14195) 1056 : audit [DBG] from='client.25739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:32.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:32 vm02 bash[17473]: audit 2026-03-10T08:55:30.559310+0000 mgr.vm02.ttibzz (mgr.14195) 1056 : audit [DBG] from='client.25739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:32.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:32 vm02 bash[17473]: audit 2026-03-10T08:55:30.726935+0000 mgr.vm02.ttibzz (mgr.14195) 1057 : audit [DBG] from='client.25743 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:32.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:32 vm02 bash[17473]: audit 2026-03-10T08:55:30.726935+0000 mgr.vm02.ttibzz (mgr.14195) 1057 : audit [DBG] from='client.25743 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:32.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:32 vm02 bash[17473]: cluster 2026-03-10T08:55:31.929525+0000 mgr.vm02.ttibzz (mgr.14195) 1058 : cluster [DBG] pgmap v623: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-03-10T08:55:32.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:32 vm02 bash[17473]: cluster 2026-03-10T08:55:31.929525+0000 mgr.vm02.ttibzz (mgr.14195) 1058 : cluster [DBG] pgmap v623: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-03-10T08:55:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:35 vm02 bash[17473]: cluster 2026-03-10T08:55:33.929944+0000 mgr.vm02.ttibzz (mgr.14195) 1059 : cluster [DBG] pgmap v624: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:55:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:35 vm02 bash[17473]: cluster 2026-03-10T08:55:33.929944+0000 mgr.vm02.ttibzz (mgr.14195) 1059 : cluster [DBG] pgmap v624: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:55:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:35 vm02 bash[17473]: audit 2026-03-10T08:55:34.259325+0000 mon.vm02 (mon.0) 1221 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:35.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:35 vm02 bash[17473]: audit 2026-03-10T08:55:34.259325+0000 mon.vm02 (mon.0) 1221 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:36.133 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:36.302 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:36.332 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 9m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:36.332 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 9m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:36.332 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:36.332 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:55:36.487 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:36.487 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:36.487 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:37 vm02 bash[17473]: cluster 2026-03-10T08:55:35.930520+0000 mgr.vm02.ttibzz (mgr.14195) 1060 : cluster [DBG] pgmap v625: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:55:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:37 vm02 bash[17473]: cluster 2026-03-10T08:55:35.930520+0000 mgr.vm02.ttibzz (mgr.14195) 1060 : cluster [DBG] pgmap v625: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:55:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:37 vm02 bash[17473]: audit 2026-03-10T08:55:36.492503+0000 mon.vm02 (mon.0) 1222 : audit [DBG] from='client.? 192.168.123.102:0/2170159922' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:37.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:37 vm02 bash[17473]: audit 2026-03-10T08:55:36.492503+0000 mon.vm02 (mon.0) 1222 : audit [DBG] from='client.? 192.168.123.102:0/2170159922' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:38 vm02 bash[17473]: audit 2026-03-10T08:55:36.114953+0000 mgr.vm02.ttibzz (mgr.14195) 1061 : audit [DBG] from='client.16740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:38 vm02 bash[17473]: audit 2026-03-10T08:55:36.114953+0000 mgr.vm02.ttibzz (mgr.14195) 1061 : audit [DBG] from='client.16740 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:38 vm02 bash[17473]: audit 2026-03-10T08:55:36.304734+0000 mgr.vm02.ttibzz (mgr.14195) 1062 : audit [DBG] from='client.16744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:38 vm02 bash[17473]: audit 2026-03-10T08:55:36.304734+0000 mgr.vm02.ttibzz (mgr.14195) 1062 : audit [DBG] from='client.16744 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:38 vm02 bash[17473]: cluster 2026-03-10T08:55:37.930915+0000 mgr.vm02.ttibzz (mgr.14195) 1063 : cluster [DBG] pgmap v626: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 170 B/s wr, 5 op/s 2026-03-10T08:55:38.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:38 vm02 bash[17473]: cluster 2026-03-10T08:55:37.930915+0000 mgr.vm02.ttibzz (mgr.14195) 1063 : cluster [DBG] pgmap v626: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 170 B/s wr, 5 op/s 2026-03-10T08:55:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:40 vm02 bash[17473]: cluster 2026-03-10T08:55:39.931334+0000 mgr.vm02.ttibzz (mgr.14195) 1064 : cluster [DBG] pgmap v627: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 341 B/s wr, 26 op/s 2026-03-10T08:55:40.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:40 vm02 bash[17473]: cluster 2026-03-10T08:55:39.931334+0000 mgr.vm02.ttibzz (mgr.14195) 1064 : cluster [DBG] pgmap v627: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 341 B/s wr, 26 op/s 2026-03-10T08:55:41.682 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:41.847 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:41.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 10m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:41.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 10m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:41.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:41.847 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:55:42.064 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:42.064 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:42.064 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: audit 2026-03-10T08:55:41.665848+0000 mgr.vm02.ttibzz (mgr.14195) 1065 : audit [DBG] from='client.16752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: audit 2026-03-10T08:55:41.665848+0000 mgr.vm02.ttibzz (mgr.14195) 1065 : audit [DBG] from='client.16752 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: audit 2026-03-10T08:55:41.849396+0000 mgr.vm02.ttibzz (mgr.14195) 1066 : audit [DBG] from='client.16756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: audit 2026-03-10T08:55:41.849396+0000 mgr.vm02.ttibzz (mgr.14195) 1066 : audit [DBG] from='client.16756 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: cluster 2026-03-10T08:55:41.931826+0000 mgr.vm02.ttibzz (mgr.14195) 1067 : cluster [DBG] pgmap v628: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 170 B/s wr, 53 op/s 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: cluster 2026-03-10T08:55:41.931826+0000 mgr.vm02.ttibzz (mgr.14195) 1067 : cluster [DBG] pgmap v628: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 32 KiB/s rd, 170 B/s wr, 53 op/s 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: audit 2026-03-10T08:55:42.069249+0000 mon.vm02 (mon.0) 1223 : audit [DBG] from='client.? 192.168.123.102:0/1392763492' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:43.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:43 vm02 bash[17473]: audit 2026-03-10T08:55:42.069249+0000 mon.vm02 (mon.0) 1223 : audit [DBG] from='client.? 192.168.123.102:0/1392763492' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:44 vm02 bash[17473]: cluster 2026-03-10T08:55:43.932336+0000 mgr.vm02.ttibzz (mgr.14195) 1068 : cluster [DBG] pgmap v629: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:44 vm02 bash[17473]: cluster 2026-03-10T08:55:43.932336+0000 mgr.vm02.ttibzz (mgr.14195) 1068 : cluster [DBG] pgmap v629: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:47 vm02 bash[17473]: cluster 2026-03-10T08:55:45.932955+0000 mgr.vm02.ttibzz (mgr.14195) 1069 : cluster [DBG] pgmap v630: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:47 vm02 bash[17473]: cluster 2026-03-10T08:55:45.932955+0000 mgr.vm02.ttibzz (mgr.14195) 1069 : cluster [DBG] pgmap v630: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:47.282 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:47.464 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:47.464 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 10m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:47.464 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 10m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:47.464 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (4m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:47.464 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:55:47.683 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:47.683 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:47.683 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: audit 2026-03-10T08:55:47.268316+0000 mgr.vm02.ttibzz (mgr.14195) 1070 : audit [DBG] from='client.16762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: audit 2026-03-10T08:55:47.268316+0000 mgr.vm02.ttibzz (mgr.14195) 1070 : audit [DBG] from='client.16762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: audit 2026-03-10T08:55:47.464739+0000 mgr.vm02.ttibzz (mgr.14195) 1071 : audit [DBG] from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: audit 2026-03-10T08:55:47.464739+0000 mgr.vm02.ttibzz (mgr.14195) 1071 : audit [DBG] from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: audit 2026-03-10T08:55:47.688376+0000 mon.vm02 (mon.0) 1224 : audit [DBG] from='client.? 192.168.123.102:0/3324076007' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: audit 2026-03-10T08:55:47.688376+0000 mon.vm02 (mon.0) 1224 : audit [DBG] from='client.? 192.168.123.102:0/3324076007' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: cluster 2026-03-10T08:55:47.933454+0000 mgr.vm02.ttibzz (mgr.14195) 1072 : cluster [DBG] pgmap v631: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:48.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:48 vm02 bash[17473]: cluster 2026-03-10T08:55:47.933454+0000 mgr.vm02.ttibzz (mgr.14195) 1072 : cluster [DBG] pgmap v631: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-03-10T08:55:49.689 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:49 vm02 bash[17473]: audit 2026-03-10T08:55:49.259931+0000 mon.vm02 (mon.0) 1225 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:49.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:49 vm02 bash[17473]: audit 2026-03-10T08:55:49.259931+0000 mon.vm02 (mon.0) 1225 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:55:50.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:50 vm02 bash[17473]: cluster 2026-03-10T08:55:49.933887+0000 mgr.vm02.ttibzz (mgr.14195) 1073 : cluster [DBG] pgmap v632: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 170 B/s wr, 54 op/s 2026-03-10T08:55:50.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:50 vm02 bash[17473]: cluster 2026-03-10T08:55:49.933887+0000 mgr.vm02.ttibzz (mgr.14195) 1073 : cluster [DBG] pgmap v632: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 170 B/s wr, 54 op/s 2026-03-10T08:55:52.898 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:53.090 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:53.090 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (10m) 10m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:53.090 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 10m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:53.090 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:53.090 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:55:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:53 vm02 bash[17473]: cluster 2026-03-10T08:55:51.934495+0000 mgr.vm02.ttibzz (mgr.14195) 1074 : cluster [DBG] pgmap v633: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 33 op/s 2026-03-10T08:55:53.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:53 vm02 bash[17473]: cluster 2026-03-10T08:55:51.934495+0000 mgr.vm02.ttibzz (mgr.14195) 1074 : cluster [DBG] pgmap v633: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 33 op/s 2026-03-10T08:55:53.304 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:53.304 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:53.304 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:54.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:54 vm02 bash[17473]: audit 2026-03-10T08:55:52.884487+0000 mgr.vm02.ttibzz (mgr.14195) 1075 : audit [DBG] from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:54.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:54 vm02 bash[17473]: audit 2026-03-10T08:55:52.884487+0000 mgr.vm02.ttibzz (mgr.14195) 1075 : audit [DBG] from='client.16774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:54.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:54 vm02 bash[17473]: audit 2026-03-10T08:55:53.308826+0000 mon.vm02 (mon.0) 1226 : audit [DBG] from='client.? 192.168.123.102:0/327620330' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:54.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:54 vm02 bash[17473]: audit 2026-03-10T08:55:53.308826+0000 mon.vm02 (mon.0) 1226 : audit [DBG] from='client.? 192.168.123.102:0/327620330' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:55.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:55 vm02 bash[17473]: audit 2026-03-10T08:55:53.091542+0000 mgr.vm02.ttibzz (mgr.14195) 1076 : audit [DBG] from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:55.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:55 vm02 bash[17473]: audit 2026-03-10T08:55:53.091542+0000 mgr.vm02.ttibzz (mgr.14195) 1076 : audit [DBG] from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:55:55.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:55 vm02 bash[17473]: cluster 2026-03-10T08:55:53.935041+0000 mgr.vm02.ttibzz (mgr.14195) 1077 : cluster [DBG] pgmap v634: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 0 B/s wr, 5 op/s 2026-03-10T08:55:55.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:55 vm02 bash[17473]: cluster 2026-03-10T08:55:53.935041+0000 mgr.vm02.ttibzz (mgr.14195) 1077 : cluster [DBG] pgmap v634: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 3.5 KiB/s rd, 0 B/s wr, 5 op/s 2026-03-10T08:55:56.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:56 vm02 bash[17473]: cluster 2026-03-10T08:55:55.935561+0000 mgr.vm02.ttibzz (mgr.14195) 1078 : cluster [DBG] pgmap v635: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:56.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:56 vm02 bash[17473]: cluster 2026-03-10T08:55:55.935561+0000 mgr.vm02.ttibzz (mgr.14195) 1078 : cluster [DBG] pgmap v635: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:58.507 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to stop 2026-03-10T08:55:58.682 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:55:58.682 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (11m) 10m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:55:58.682 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 10m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:55:58.682 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:55:58.682 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:55:58.893 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:55:58.893 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:55:58.893 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:55:59.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:59 vm02 bash[17473]: cluster 2026-03-10T08:55:57.936033+0000 mgr.vm02.ttibzz (mgr.14195) 1079 : cluster [DBG] pgmap v636: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:59.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:59 vm02 bash[17473]: cluster 2026-03-10T08:55:57.936033+0000 mgr.vm02.ttibzz (mgr.14195) 1079 : cluster [DBG] pgmap v636: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:55:59.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:59 vm02 bash[17473]: audit 2026-03-10T08:55:58.898289+0000 mon.vm02 (mon.0) 1227 : audit [DBG] from='client.? 192.168.123.102:0/1909384014' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:59.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:55:59 vm02 bash[17473]: audit 2026-03-10T08:55:58.898289+0000 mon.vm02 (mon.0) 1227 : audit [DBG] from='client.? 192.168.123.102:0/1909384014' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:55:59.829 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:55:59.836 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:55:59.836 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 18700 0 --:--:-- --:--:-- --:--:-- 18700 2026-03-10T08:56:00.070 INFO:teuthology.orchestra.run.vm02.stdout:anonymousScheduled to start rgw.foo.vm07.zylyez on host 'vm07' 2026-03-10T08:56:00.302 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to start 2026-03-10T08:56:00.534 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:55:58.494396+0000 mgr.vm02.ttibzz (mgr.14195) 1080 : audit [DBG] from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:00.534 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:55:58.494396+0000 mgr.vm02.ttibzz (mgr.14195) 1080 : audit [DBG] from='client.16786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:00.534 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:55:58.683791+0000 mgr.vm02.ttibzz (mgr.14195) 1081 : audit [DBG] from='client.16790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:00.534 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:55:58.683791+0000 mgr.vm02.ttibzz (mgr.14195) 1081 : audit [DBG] from='client.16790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: cluster 2026-03-10T08:55:59.936549+0000 mgr.vm02.ttibzz (mgr.14195) 1082 : cluster [DBG] pgmap v637: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: cluster 2026-03-10T08:55:59.936549+0000 mgr.vm02.ttibzz (mgr.14195) 1082 : cluster [DBG] pgmap v637: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.025007+0000 mgr.vm02.ttibzz (mgr.14195) 1083 : audit [DBG] from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm07.zylyez", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.025007+0000 mgr.vm02.ttibzz (mgr.14195) 1083 : audit [DBG] from='client.16798 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm07.zylyez", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: cephadm 2026-03-10T08:56:00.025389+0000 mgr.vm02.ttibzz (mgr.14195) 1084 : cephadm [INF] Schedule start daemon rgw.foo.vm07.zylyez 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: cephadm 2026-03-10T08:56:00.025389+0000 mgr.vm02.ttibzz (mgr.14195) 1084 : cephadm [INF] Schedule start daemon rgw.foo.vm07.zylyez 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.040242+0000 mon.vm02 (mon.0) 1228 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.040242+0000 mon.vm02 (mon.0) 1228 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.074559+0000 mon.vm02 (mon.0) 1229 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.074559+0000 mon.vm02 (mon.0) 1229 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.079211+0000 mon.vm02 (mon.0) 1230 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:00.535 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:00 vm02 bash[17473]: audit 2026-03-10T08:56:00.079211+0000 mon.vm02 (mon.0) 1230 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:00.536 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:00.536 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (11m) 10m ago 16m 93.4M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:56:00.536 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 10m ago 16m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:56:00.536 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 4m ago 16m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:56:00.536 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 16m - - 2026-03-10T08:56:00.776 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:56:00.776 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:56:00.776 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.282045+0000 mgr.vm02.ttibzz (mgr.14195) 1085 : audit [DBG] from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.282045+0000 mgr.vm02.ttibzz (mgr.14195) 1085 : audit [DBG] from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.434905+0000 mon.vm02 (mon.0) 1231 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.434905+0000 mon.vm02 (mon.0) 1231 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.443319+0000 mon.vm02 (mon.0) 1232 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.443319+0000 mon.vm02 (mon.0) 1232 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.532577+0000 mgr.vm02.ttibzz (mgr.14195) 1086 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.532577+0000 mgr.vm02.ttibzz (mgr.14195) 1086 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:01.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.781056+0000 mon.vm02 (mon.0) 1233 : audit [DBG] from='client.? 192.168.123.102:0/1637441055' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:01.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:01 vm02 bash[17473]: audit 2026-03-10T08:56:00.781056+0000 mon.vm02 (mon.0) 1233 : audit [DBG] from='client.? 192.168.123.102:0/1637441055' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:02.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:02 vm02 bash[17473]: cluster 2026-03-10T08:56:01.937024+0000 mgr.vm02.ttibzz (mgr.14195) 1087 : cluster [DBG] pgmap v638: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:02.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:02 vm02 bash[17473]: cluster 2026-03-10T08:56:01.937024+0000 mgr.vm02.ttibzz (mgr.14195) 1087 : cluster [DBG] pgmap v638: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:05.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:05 vm02 bash[17473]: cluster 2026-03-10T08:56:03.937458+0000 mgr.vm02.ttibzz (mgr.14195) 1088 : cluster [DBG] pgmap v639: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:05.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:05 vm02 bash[17473]: cluster 2026-03-10T08:56:03.937458+0000 mgr.vm02.ttibzz (mgr.14195) 1088 : cluster [DBG] pgmap v639: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:05.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:05 vm02 bash[17473]: audit 2026-03-10T08:56:04.259835+0000 mon.vm02 (mon.0) 1234 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:05.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:05 vm02 bash[17473]: audit 2026-03-10T08:56:04.259835+0000 mon.vm02 (mon.0) 1234 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:06.018 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to start 2026-03-10T08:56:06.256 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:06.256 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (11m) 0s ago 17m 123M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:56:06.256 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 0s ago 17m 125M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:56:06.256 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 4m ago 17m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:56:06.256 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 17m - - 2026-03-10T08:56:06.469 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:56:06.469 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:56:06.469 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:56:06.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:06 vm02 bash[17473]: cluster 2026-03-10T08:56:05.937981+0000 mgr.vm02.ttibzz (mgr.14195) 1089 : cluster [DBG] pgmap v640: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:06.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:06 vm02 bash[17473]: cluster 2026-03-10T08:56:05.937981+0000 mgr.vm02.ttibzz (mgr.14195) 1089 : cluster [DBG] pgmap v640: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:06.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:06 vm02 bash[17473]: audit 2026-03-10T08:56:05.996461+0000 mgr.vm02.ttibzz (mgr.14195) 1090 : audit [DBG] from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:06.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:06 vm02 bash[17473]: audit 2026-03-10T08:56:05.996461+0000 mgr.vm02.ttibzz (mgr.14195) 1090 : audit [DBG] from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.228127+0000 mon.vm02 (mon.0) 1235 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.228127+0000 mon.vm02 (mon.0) 1235 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.234255+0000 mon.vm02 (mon.0) 1236 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.234255+0000 mon.vm02 (mon.0) 1236 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.235369+0000 mon.vm02 (mon.0) 1237 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.235369+0000 mon.vm02 (mon.0) 1237 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.236032+0000 mon.vm02 (mon.0) 1238 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.236032+0000 mon.vm02 (mon.0) 1238 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: cluster 2026-03-10T08:56:06.237069+0000 mgr.vm02.ttibzz (mgr.14195) 1091 : cluster [DBG] pgmap v641: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: cluster 2026-03-10T08:56:06.237069+0000 mgr.vm02.ttibzz (mgr.14195) 1091 : cluster [DBG] pgmap v641: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 198 B/s wr, 0 op/s 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: cluster 2026-03-10T08:56:06.237210+0000 mgr.vm02.ttibzz (mgr.14195) 1092 : cluster [DBG] pgmap v642: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: cluster 2026-03-10T08:56:06.237210+0000 mgr.vm02.ttibzz (mgr.14195) 1092 : cluster [DBG] pgmap v642: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.241794+0000 mon.vm02 (mon.0) 1239 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.241794+0000 mon.vm02 (mon.0) 1239 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.243703+0000 mon.vm02 (mon.0) 1240 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.243703+0000 mon.vm02 (mon.0) 1240 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.258319+0000 mgr.vm02.ttibzz (mgr.14195) 1093 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.258319+0000 mgr.vm02.ttibzz (mgr.14195) 1093 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.473904+0000 mon.vm02 (mon.0) 1241 : audit [DBG] from='client.? 192.168.123.102:0/1859375579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:07.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:07 vm02 bash[17473]: audit 2026-03-10T08:56:06.473904+0000 mon.vm02 (mon.0) 1241 : audit [DBG] from='client.? 192.168.123.102:0/1859375579' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:09.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:09 vm02 bash[17473]: cluster 2026-03-10T08:56:08.237581+0000 mgr.vm02.ttibzz (mgr.14195) 1094 : cluster [DBG] pgmap v643: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:56:09.690 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:09 vm02 bash[17473]: cluster 2026-03-10T08:56:08.237581+0000 mgr.vm02.ttibzz (mgr.14195) 1094 : cluster [DBG] pgmap v643: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail 2026-03-10T08:56:11.682 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to start 2026-03-10T08:56:11.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:11 vm02 bash[17473]: cluster 2026-03-10T08:56:10.238427+0000 mgr.vm02.ttibzz (mgr.14195) 1095 : cluster [DBG] pgmap v644: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:11.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:11 vm02 bash[17473]: cluster 2026-03-10T08:56:10.238427+0000 mgr.vm02.ttibzz (mgr.14195) 1095 : cluster [DBG] pgmap v644: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:11.856 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:11.856 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (11m) 5s ago 17m 123M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:56:11.856 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 5s ago 17m 125M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:56:11.856 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 4m ago 17m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:56:11.857 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 17m - - 2026-03-10T08:56:12.063 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:56:12.063 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:56:12.063 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:56:12.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:12 vm02 bash[17473]: audit 2026-03-10T08:56:11.669678+0000 mgr.vm02.ttibzz (mgr.14195) 1096 : audit [DBG] from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:12.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:12 vm02 bash[17473]: audit 2026-03-10T08:56:11.669678+0000 mgr.vm02.ttibzz (mgr.14195) 1096 : audit [DBG] from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:12.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:12 vm02 bash[17473]: audit 2026-03-10T08:56:11.858164+0000 mgr.vm02.ttibzz (mgr.14195) 1097 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:12.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:12 vm02 bash[17473]: audit 2026-03-10T08:56:11.858164+0000 mgr.vm02.ttibzz (mgr.14195) 1097 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:12.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:12 vm02 bash[17473]: audit 2026-03-10T08:56:12.067714+0000 mon.vm02 (mon.0) 1242 : audit [DBG] from='client.? 192.168.123.102:0/3739822912' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:12.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:12 vm02 bash[17473]: audit 2026-03-10T08:56:12.067714+0000 mon.vm02 (mon.0) 1242 : audit [DBG] from='client.? 192.168.123.102:0/3739822912' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:13.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:13 vm02 bash[17473]: cluster 2026-03-10T08:56:12.239236+0000 mgr.vm02.ttibzz (mgr.14195) 1098 : cluster [DBG] pgmap v645: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:13.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:13 vm02 bash[17473]: cluster 2026-03-10T08:56:12.239236+0000 mgr.vm02.ttibzz (mgr.14195) 1098 : cluster [DBG] pgmap v645: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: cluster 2026-03-10T08:56:14.239826+0000 mgr.vm02.ttibzz (mgr.14195) 1099 : cluster [DBG] pgmap v646: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: cluster 2026-03-10T08:56:14.239826+0000 mgr.vm02.ttibzz (mgr.14195) 1099 : cluster [DBG] pgmap v646: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 123 B/s rd, 246 B/s wr, 0 op/s 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: audit 2026-03-10T08:56:15.188313+0000 mon.vm02 (mon.0) 1243 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: audit 2026-03-10T08:56:15.188313+0000 mon.vm02 (mon.0) 1243 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: audit 2026-03-10T08:56:15.192816+0000 mon.vm02 (mon.0) 1244 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: audit 2026-03-10T08:56:15.192816+0000 mon.vm02 (mon.0) 1244 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: audit 2026-03-10T08:56:15.223411+0000 mon.vm02 (mon.0) 1245 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:15.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:15 vm02 bash[17473]: audit 2026-03-10T08:56:15.223411+0000 mon.vm02 (mon.0) 1245 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:17.265 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for rgw.foo.vm07.zylyez to start 2026-03-10T08:56:17.455 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:17.455 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (11m) 11s ago 17m 123M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:56:17.455 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (10m) 11s ago 17m 125M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:56:17.456 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (5m) 4m ago 17m 90.2M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:56:17.456 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 error 4m ago 17m - - 2026-03-10T08:56:17.682 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-03-10T08:56:17.682 INFO:teuthology.orchestra.run.vm02.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-10T08:56:17.682 INFO:teuthology.orchestra.run.vm02.stdout: daemon rgw.foo.vm07.zylyez on vm07 is in error state 2026-03-10T08:56:17.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:17 vm02 bash[17473]: cluster 2026-03-10T08:56:16.240278+0000 mgr.vm02.ttibzz (mgr.14195) 1100 : cluster [DBG] pgmap v647: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T08:56:17.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:17 vm02 bash[17473]: cluster 2026-03-10T08:56:16.240278+0000 mgr.vm02.ttibzz (mgr.14195) 1100 : cluster [DBG] pgmap v647: 129 pgs: 129 active+clean; 454 KiB data, 251 MiB used, 160 GiB / 160 GiB avail; 102 B/s rd, 204 B/s wr, 0 op/s 2026-03-10T08:56:18.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:18 vm02 bash[17473]: audit 2026-03-10T08:56:17.251778+0000 mgr.vm02.ttibzz (mgr.14195) 1101 : audit [DBG] from='client.16848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:18.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:18 vm02 bash[17473]: audit 2026-03-10T08:56:17.251778+0000 mgr.vm02.ttibzz (mgr.14195) 1101 : audit [DBG] from='client.16848 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:18.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:18 vm02 bash[17473]: audit 2026-03-10T08:56:17.455090+0000 mgr.vm02.ttibzz (mgr.14195) 1102 : audit [DBG] from='client.16852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:18.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:18 vm02 bash[17473]: audit 2026-03-10T08:56:17.455090+0000 mgr.vm02.ttibzz (mgr.14195) 1102 : audit [DBG] from='client.16852 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:18.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:18 vm02 bash[17473]: audit 2026-03-10T08:56:17.686203+0000 mon.vm02 (mon.0) 1246 : audit [DBG] from='client.? 192.168.123.102:0/2581750025' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:18.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:18 vm02 bash[17473]: audit 2026-03-10T08:56:17.686203+0000 mon.vm02 (mon.0) 1246 : audit [DBG] from='client.? 192.168.123.102:0/2581750025' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:19.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:19 vm02 bash[17473]: cluster 2026-03-10T08:56:18.240678+0000 mgr.vm02.ttibzz (mgr.14195) 1103 : cluster [DBG] pgmap v648: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 170 B/s wr, 16 op/s 2026-03-10T08:56:19.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:19 vm02 bash[17473]: cluster 2026-03-10T08:56:18.240678+0000 mgr.vm02.ttibzz (mgr.14195) 1103 : cluster [DBG] pgmap v648: 129 pgs: 129 active+clean; 454 KiB data, 255 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 170 B/s wr, 16 op/s 2026-03-10T08:56:19.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:19 vm02 bash[17473]: audit 2026-03-10T08:56:19.260310+0000 mon.vm02 (mon.0) 1247 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:19.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:19 vm02 bash[17473]: audit 2026-03-10T08:56:19.260310+0000 mon.vm02 (mon.0) 1247 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:21.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.661149+0000 mon.vm02 (mon.0) 1248 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:21.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.661149+0000 mon.vm02 (mon.0) 1248 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:21.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.666857+0000 mon.vm02 (mon.0) 1249 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:21.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.666857+0000 mon.vm02 (mon.0) 1249 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:21.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.668011+0000 mon.vm02 (mon.0) 1250 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.668011+0000 mon.vm02 (mon.0) 1250 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.669230+0000 mon.vm02 (mon.0) 1251 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.669230+0000 mon.vm02 (mon.0) 1251 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: cluster 2026-03-10T08:56:19.670746+0000 mgr.vm02.ttibzz (mgr.14195) 1104 : cluster [DBG] pgmap v649: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 179 B/s wr, 46 op/s 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: cluster 2026-03-10T08:56:19.670746+0000 mgr.vm02.ttibzz (mgr.14195) 1104 : cluster [DBG] pgmap v649: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 179 B/s wr, 46 op/s 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.674678+0000 mon.vm02 (mon.0) 1252 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.674678+0000 mon.vm02 (mon.0) 1252 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.676987+0000 mon.vm02 (mon.0) 1253 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:21.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:20 vm02 bash[17473]: audit 2026-03-10T08:56:19.676987+0000 mon.vm02 (mon.0) 1253 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:22.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:21 vm02 bash[17473]: cluster 2026-03-10T08:56:20.666674+0000 mon.vm02 (mon.0) 1254 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T08:56:22.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:21 vm02 bash[17473]: cluster 2026-03-10T08:56:20.666674+0000 mon.vm02 (mon.0) 1254 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-03-10T08:56:22.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:21 vm02 bash[17473]: cluster 2026-03-10T08:56:20.666687+0000 mon.vm02 (mon.0) 1255 : cluster [INF] Cluster is now healthy 2026-03-10T08:56:22.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:21 vm02 bash[17473]: cluster 2026-03-10T08:56:20.666687+0000 mon.vm02 (mon.0) 1255 : cluster [INF] Cluster is now healthy 2026-03-10T08:56:22.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:22 vm02 bash[17473]: cluster 2026-03-10T08:56:21.671126+0000 mgr.vm02.ttibzz (mgr.14195) 1105 : cluster [DBG] pgmap v650: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 0 B/s wr, 86 op/s 2026-03-10T08:56:22.781 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:22 vm02 bash[17473]: cluster 2026-03-10T08:56:21.671126+0000 mgr.vm02.ttibzz (mgr.14195) 1105 : cluster [DBG] pgmap v650: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 0 B/s wr, 86 op/s 2026-03-10T08:56:22.869 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (7s) 3s ago 17m 89.1M - 19.2.3-678-ge911bdeb 654f31e6858e 945cb65f23ce 2026-03-10T08:56:22.869 INFO:teuthology.orchestra.run.vm02.stdout:Check with each haproxy down in turn... 2026-03-10T08:56:23.223 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled to stop haproxy.rgw.foo.vm02.rwnyxr on host 'vm02' 2026-03-10T08:56:23.429 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm02.rwnyxr to stop 2026-03-10T08:56:23.638 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:23.638 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (17m) 17s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 a9a01ca4ad82 2026-03-10T08:56:23.638 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (17m) 3s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 7c1849d4a489 2026-03-10T08:56:23.872 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:22.857031+0000 mgr.vm02.ttibzz (mgr.14195) 1106 : audit [DBG] from='client.16860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:22.857031+0000 mgr.vm02.ttibzz (mgr.14195) 1106 : audit [DBG] from='client.16860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.040304+0000 mgr.vm02.ttibzz (mgr.14195) 1107 : audit [DBG] from='client.16864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.040304+0000 mgr.vm02.ttibzz (mgr.14195) 1107 : audit [DBG] from='client.16864 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.220166+0000 mon.vm02 (mon.0) 1256 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.220166+0000 mon.vm02 (mon.0) 1256 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.225360+0000 mon.vm02 (mon.0) 1257 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.225360+0000 mon.vm02 (mon.0) 1257 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.226472+0000 mon.vm02 (mon.0) 1258 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.226472+0000 mon.vm02 (mon.0) 1258 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.603137+0000 mon.vm02 (mon.0) 1259 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.603137+0000 mon.vm02 (mon.0) 1259 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.603989+0000 mon.vm02 (mon.0) 1260 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.603989+0000 mon.vm02 (mon.0) 1260 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.609602+0000 mon.vm02 (mon.0) 1261 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.609602+0000 mon.vm02 (mon.0) 1261 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.612413+0000 mon.vm02 (mon.0) 1262 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:24.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:23 vm02 bash[17473]: audit 2026-03-10T08:56:23.612413+0000 mon.vm02 (mon.0) 1262 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.214133+0000 mgr.vm02.ttibzz (mgr.14195) 1108 : audit [DBG] from='client.16868 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm02.rwnyxr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.214133+0000 mgr.vm02.ttibzz (mgr.14195) 1108 : audit [DBG] from='client.16868 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm02.rwnyxr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: cephadm 2026-03-10T08:56:23.214528+0000 mgr.vm02.ttibzz (mgr.14195) 1109 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm02.rwnyxr 2026-03-10T08:56:25.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: cephadm 2026-03-10T08:56:23.214528+0000 mgr.vm02.ttibzz (mgr.14195) 1109 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm02.rwnyxr 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.415752+0000 mgr.vm02.ttibzz (mgr.14195) 1110 : audit [DBG] from='client.16872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.415752+0000 mgr.vm02.ttibzz (mgr.14195) 1110 : audit [DBG] from='client.16872 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.637889+0000 mgr.vm02.ttibzz (mgr.14195) 1111 : audit [DBG] from='client.16876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.637889+0000 mgr.vm02.ttibzz (mgr.14195) 1111 : audit [DBG] from='client.16876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: cluster 2026-03-10T08:56:23.671660+0000 mgr.vm02.ttibzz (mgr.14195) 1112 : cluster [DBG] pgmap v651: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 0 B/s wr, 86 op/s 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: cluster 2026-03-10T08:56:23.671660+0000 mgr.vm02.ttibzz (mgr.14195) 1112 : cluster [DBG] pgmap v651: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 0 B/s wr, 86 op/s 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.877301+0000 mon.vm02 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.102:0/667564509' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:25.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:24 vm02 bash[17473]: audit 2026-03-10T08:56:23.877301+0000 mon.vm02 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.102:0/667564509' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:27.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:26 vm02 bash[17473]: cluster 2026-03-10T08:56:25.672046+0000 mgr.vm02.ttibzz (mgr.14195) 1113 : cluster [DBG] pgmap v652: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 179 B/s wr, 86 op/s 2026-03-10T08:56:27.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:26 vm02 bash[17473]: cluster 2026-03-10T08:56:25.672046+0000 mgr.vm02.ttibzz (mgr.14195) 1113 : cluster [DBG] pgmap v652: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 179 B/s wr, 86 op/s 2026-03-10T08:56:29.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:28 vm02 bash[17473]: cluster 2026-03-10T08:56:27.672401+0000 mgr.vm02.ttibzz (mgr.14195) 1114 : cluster [DBG] pgmap v653: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 179 B/s wr, 86 op/s 2026-03-10T08:56:29.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:28 vm02 bash[17473]: cluster 2026-03-10T08:56:27.672401+0000 mgr.vm02.ttibzz (mgr.14195) 1114 : cluster [DBG] pgmap v653: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 56 KiB/s rd, 179 B/s wr, 86 op/s 2026-03-10T08:56:29.069 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm02.rwnyxr to stop 2026-03-10T08:56:29.254 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:29.254 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (17m) 23s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 a9a01ca4ad82 2026-03-10T08:56:29.254 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (17m) 9s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 7c1849d4a489 2026-03-10T08:56:29.452 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:28.832450+0000 mon.vm02 (mon.0) 1264 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:28.832450+0000 mon.vm02 (mon.0) 1264 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:28.839042+0000 mon.vm02 (mon.0) 1265 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:28.839042+0000 mon.vm02 (mon.0) 1265 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:28.870171+0000 mon.vm02 (mon.0) 1266 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:28.870171+0000 mon.vm02 (mon.0) 1266 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:29.457231+0000 mon.vm02 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.102:0/3158448624' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:30.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:29 vm02 bash[17473]: audit 2026-03-10T08:56:29.457231+0000 mon.vm02 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.102:0/3158448624' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:30 vm02 bash[17473]: audit 2026-03-10T08:56:29.048589+0000 mgr.vm02.ttibzz (mgr.14195) 1115 : audit [DBG] from='client.16884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:30 vm02 bash[17473]: audit 2026-03-10T08:56:29.048589+0000 mgr.vm02.ttibzz (mgr.14195) 1115 : audit [DBG] from='client.16884 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:30 vm02 bash[17473]: audit 2026-03-10T08:56:29.256059+0000 mgr.vm02.ttibzz (mgr.14195) 1116 : audit [DBG] from='client.16888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:30 vm02 bash[17473]: audit 2026-03-10T08:56:29.256059+0000 mgr.vm02.ttibzz (mgr.14195) 1116 : audit [DBG] from='client.16888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:30 vm02 bash[17473]: cluster 2026-03-10T08:56:29.672936+0000 mgr.vm02.ttibzz (mgr.14195) 1117 : cluster [DBG] pgmap v654: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 358 B/s wr, 69 op/s 2026-03-10T08:56:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:30 vm02 bash[17473]: cluster 2026-03-10T08:56:29.672936+0000 mgr.vm02.ttibzz (mgr.14195) 1117 : cluster [DBG] pgmap v654: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 45 KiB/s rd, 358 B/s wr, 69 op/s 2026-03-10T08:56:33.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:32 vm02 bash[17473]: cluster 2026-03-10T08:56:31.673461+0000 mgr.vm02.ttibzz (mgr.14195) 1118 : cluster [DBG] pgmap v655: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 341 B/s wr, 38 op/s 2026-03-10T08:56:33.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:32 vm02 bash[17473]: cluster 2026-03-10T08:56:31.673461+0000 mgr.vm02.ttibzz (mgr.14195) 1118 : cluster [DBG] pgmap v655: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 24 KiB/s rd, 341 B/s wr, 38 op/s 2026-03-10T08:56:34.683 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 stopped 0s ago 17m - - 2026-03-10T08:56:34.688 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:56:34.688 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:56:34.689 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-03-10T08:56:34.689 INFO:teuthology.orchestra.run.vm02.stderr:curl: (7) Failed to connect to 12.12.1.102 port 9000: Connection refused 2026-03-10T08:56:34.690 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for http://12.12.1.102:9000/ to be available 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: cluster 2026-03-10T08:56:33.673938+0000 mgr.vm02.ttibzz (mgr.14195) 1119 : cluster [DBG] pgmap v656: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: cluster 2026-03-10T08:56:33.673938+0000 mgr.vm02.ttibzz (mgr.14195) 1119 : cluster [DBG] pgmap v656: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.264978+0000 mon.vm02 (mon.0) 1268 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.264978+0000 mon.vm02 (mon.0) 1268 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.265734+0000 mon.vm02 (mon.0) 1269 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.265734+0000 mon.vm02 (mon.0) 1269 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.534544+0000 mon.vm02 (mon.0) 1270 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.534544+0000 mon.vm02 (mon.0) 1270 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.539922+0000 mon.vm02 (mon.0) 1271 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.539922+0000 mon.vm02 (mon.0) 1271 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.541004+0000 mon.vm02 (mon.0) 1272 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.541004+0000 mon.vm02 (mon.0) 1272 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.541618+0000 mon.vm02 (mon.0) 1273 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.541618+0000 mon.vm02 (mon.0) 1273 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.552227+0000 mon.vm02 (mon.0) 1274 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.552227+0000 mon.vm02 (mon.0) 1274 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.554849+0000 mon.vm02 (mon.0) 1275 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:34 vm02 bash[17473]: audit 2026-03-10T08:56:34.554849+0000 mon.vm02 (mon.0) 1275 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:35.694 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:56:35.694 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:56:35.694 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-03-10T08:56:35.694 INFO:teuthology.orchestra.run.vm02.stderr:curl: (7) Failed to connect to 12.12.1.102 port 9000: Connection refused 2026-03-10T08:56:35.694 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for http://12.12.1.102:9000/ to be available 2026-03-10T08:56:36.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:35 vm02 bash[17473]: audit 2026-03-10T08:56:34.670380+0000 mgr.vm02.ttibzz (mgr.14195) 1120 : audit [DBG] from='client.16896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:36.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:35 vm02 bash[17473]: audit 2026-03-10T08:56:34.670380+0000 mgr.vm02.ttibzz (mgr.14195) 1120 : audit [DBG] from='client.16896 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:36.698 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:56:36.699 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:56:36.699 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T08:56:36.876 INFO:teuthology.orchestra.run.vm02.stdout:anonymousScheduled to start haproxy.rgw.foo.vm02.rwnyxr on host 'vm02' 2026-03-10T08:56:37.131 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm02.rwnyxr to start 2026-03-10T08:56:37.252 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:36 vm02 bash[17473]: cluster 2026-03-10T08:56:35.674395+0000 mgr.vm02.ttibzz (mgr.14195) 1121 : cluster [DBG] pgmap v657: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:37.252 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:36 vm02 bash[17473]: cluster 2026-03-10T08:56:35.674395+0000 mgr.vm02.ttibzz (mgr.14195) 1121 : cluster [DBG] pgmap v657: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:37.315 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:37.315 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 stopped 2s ago 17m - - 2026-03-10T08:56:37.315 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (17m) 17s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 7c1849d4a489 2026-03-10T08:56:37.522 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.871019+0000 mgr.vm02.ttibzz (mgr.14195) 1122 : audit [DBG] from='client.16900 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm02.rwnyxr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.871019+0000 mgr.vm02.ttibzz (mgr.14195) 1122 : audit [DBG] from='client.16900 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm02.rwnyxr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: cephadm 2026-03-10T08:56:36.871360+0000 mgr.vm02.ttibzz (mgr.14195) 1123 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm02.rwnyxr 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: cephadm 2026-03-10T08:56:36.871360+0000 mgr.vm02.ttibzz (mgr.14195) 1123 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm02.rwnyxr 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.875128+0000 mon.vm02 (mon.0) 1276 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.875128+0000 mon.vm02 (mon.0) 1276 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.879632+0000 mon.vm02 (mon.0) 1277 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.879632+0000 mon.vm02 (mon.0) 1277 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.880464+0000 mon.vm02 (mon.0) 1278 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.880464+0000 mon.vm02 (mon.0) 1278 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.881712+0000 mon.vm02 (mon.0) 1279 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.881712+0000 mon.vm02 (mon.0) 1279 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.882169+0000 mon.vm02 (mon.0) 1280 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.882169+0000 mon.vm02 (mon.0) 1280 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.886034+0000 mon.vm02 (mon.0) 1281 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.886034+0000 mon.vm02 (mon.0) 1281 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.887569+0000 mon.vm02 (mon.0) 1282 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:36.887569+0000 mon.vm02 (mon.0) 1282 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:37.526546+0000 mon.vm02 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.102:0/728991413' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:38.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:37 vm02 bash[17473]: audit 2026-03-10T08:56:37.526546+0000 mon.vm02 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.102:0/728991413' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:38 vm02 bash[17473]: audit 2026-03-10T08:56:37.109454+0000 mgr.vm02.ttibzz (mgr.14195) 1124 : audit [DBG] from='client.16904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:38 vm02 bash[17473]: audit 2026-03-10T08:56:37.109454+0000 mgr.vm02.ttibzz (mgr.14195) 1124 : audit [DBG] from='client.16904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:38 vm02 bash[17473]: audit 2026-03-10T08:56:37.317221+0000 mgr.vm02.ttibzz (mgr.14195) 1125 : audit [DBG] from='client.16908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:38 vm02 bash[17473]: audit 2026-03-10T08:56:37.317221+0000 mgr.vm02.ttibzz (mgr.14195) 1125 : audit [DBG] from='client.16908 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:38 vm02 bash[17473]: cluster 2026-03-10T08:56:37.674845+0000 mgr.vm02.ttibzz (mgr.14195) 1126 : cluster [DBG] pgmap v658: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:38 vm02 bash[17473]: cluster 2026-03-10T08:56:37.674845+0000 mgr.vm02.ttibzz (mgr.14195) 1126 : cluster [DBG] pgmap v658: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:41.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:40 vm02 bash[17473]: cluster 2026-03-10T08:56:39.675332+0000 mgr.vm02.ttibzz (mgr.14195) 1127 : cluster [DBG] pgmap v659: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:41.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:40 vm02 bash[17473]: cluster 2026-03-10T08:56:39.675332+0000 mgr.vm02.ttibzz (mgr.14195) 1127 : cluster [DBG] pgmap v659: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:42.718 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm02.rwnyxr to start 2026-03-10T08:56:42.905 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:42.905 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 stopped 8s ago 17m - - 2026-03-10T08:56:42.905 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (17m) 23s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 7c1849d4a489 2026-03-10T08:56:43.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:42 vm02 bash[17473]: cluster 2026-03-10T08:56:41.675853+0000 mgr.vm02.ttibzz (mgr.14195) 1128 : cluster [DBG] pgmap v660: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:43.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:42 vm02 bash[17473]: cluster 2026-03-10T08:56:41.675853+0000 mgr.vm02.ttibzz (mgr.14195) 1128 : cluster [DBG] pgmap v660: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:43.120 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:56:44.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:44 vm02 bash[17473]: audit 2026-03-10T08:56:42.703630+0000 mgr.vm02.ttibzz (mgr.14195) 1129 : audit [DBG] from='client.16916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:44.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:44 vm02 bash[17473]: audit 2026-03-10T08:56:42.703630+0000 mgr.vm02.ttibzz (mgr.14195) 1129 : audit [DBG] from='client.16916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:44.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:44 vm02 bash[17473]: audit 2026-03-10T08:56:42.907217+0000 mgr.vm02.ttibzz (mgr.14195) 1130 : audit [DBG] from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:44.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:44 vm02 bash[17473]: audit 2026-03-10T08:56:42.907217+0000 mgr.vm02.ttibzz (mgr.14195) 1130 : audit [DBG] from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:44.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:44 vm02 bash[17473]: audit 2026-03-10T08:56:43.124350+0000 mon.vm02 (mon.0) 1284 : audit [DBG] from='client.? 192.168.123.102:0/1707615825' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:44.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:44 vm02 bash[17473]: audit 2026-03-10T08:56:43.124350+0000 mon.vm02 (mon.0) 1284 : audit [DBG] from='client.? 192.168.123.102:0/1707615825' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:45.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:45 vm02 bash[17473]: cluster 2026-03-10T08:56:43.676385+0000 mgr.vm02.ttibzz (mgr.14195) 1131 : cluster [DBG] pgmap v661: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:45.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:45 vm02 bash[17473]: cluster 2026-03-10T08:56:43.676385+0000 mgr.vm02.ttibzz (mgr.14195) 1131 : cluster [DBG] pgmap v661: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: cluster 2026-03-10T08:56:45.676884+0000 mgr.vm02.ttibzz (mgr.14195) 1132 : cluster [DBG] pgmap v662: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:47.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: cluster 2026-03-10T08:56:45.676884+0000 mgr.vm02.ttibzz (mgr.14195) 1132 : cluster [DBG] pgmap v662: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: audit 2026-03-10T08:56:45.873771+0000 mon.vm02 (mon.0) 1285 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: audit 2026-03-10T08:56:45.873771+0000 mon.vm02 (mon.0) 1285 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: audit 2026-03-10T08:56:45.879063+0000 mon.vm02 (mon.0) 1286 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: audit 2026-03-10T08:56:45.879063+0000 mon.vm02 (mon.0) 1286 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: audit 2026-03-10T08:56:45.925931+0000 mon.vm02 (mon.0) 1287 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:46 vm02 bash[17473]: audit 2026-03-10T08:56:45.925931+0000 mon.vm02 (mon.0) 1287 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:48.314 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm02.rwnyxr to start 2026-03-10T08:56:48.486 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:48.486 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 stopped 13s ago 17m - - 2026-03-10T08:56:48.486 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (17m) 28s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 7c1849d4a489 2026-03-10T08:56:48.689 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:56:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:48 vm02 bash[17473]: cluster 2026-03-10T08:56:47.677305+0000 mgr.vm02.ttibzz (mgr.14195) 1133 : cluster [DBG] pgmap v663: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:48 vm02 bash[17473]: cluster 2026-03-10T08:56:47.677305+0000 mgr.vm02.ttibzz (mgr.14195) 1133 : cluster [DBG] pgmap v663: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:48 vm02 bash[17473]: audit 2026-03-10T08:56:48.693675+0000 mon.vm02 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.102:0/3611819186' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:48 vm02 bash[17473]: audit 2026-03-10T08:56:48.693675+0000 mon.vm02 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.102:0/3611819186' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:50.031 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:49 vm02 bash[17473]: audit 2026-03-10T08:56:48.296296+0000 mgr.vm02.ttibzz (mgr.14195) 1134 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:50.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:49 vm02 bash[17473]: audit 2026-03-10T08:56:48.296296+0000 mgr.vm02.ttibzz (mgr.14195) 1134 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:50.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:49 vm02 bash[17473]: audit 2026-03-10T08:56:48.487512+0000 mgr.vm02.ttibzz (mgr.14195) 1135 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:50.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:49 vm02 bash[17473]: audit 2026-03-10T08:56:48.487512+0000 mgr.vm02.ttibzz (mgr.14195) 1135 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:50.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:49 vm02 bash[17473]: audit 2026-03-10T08:56:49.260704+0000 mon.vm02 (mon.0) 1289 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:50.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:49 vm02 bash[17473]: audit 2026-03-10T08:56:49.260704+0000 mon.vm02 (mon.0) 1289 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: cluster 2026-03-10T08:56:49.677790+0000 mgr.vm02.ttibzz (mgr.14195) 1136 : cluster [DBG] pgmap v664: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: cluster 2026-03-10T08:56:49.677790+0000 mgr.vm02.ttibzz (mgr.14195) 1136 : cluster [DBG] pgmap v664: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.679572+0000 mon.vm02 (mon.0) 1290 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.679572+0000 mon.vm02 (mon.0) 1290 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.686152+0000 mon.vm02 (mon.0) 1291 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.686152+0000 mon.vm02 (mon.0) 1291 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.687189+0000 mon.vm02 (mon.0) 1292 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.687189+0000 mon.vm02 (mon.0) 1292 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.687930+0000 mon.vm02 (mon.0) 1293 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.687930+0000 mon.vm02 (mon.0) 1293 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.691985+0000 mon.vm02 (mon.0) 1294 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.691985+0000 mon.vm02 (mon.0) 1294 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.693506+0000 mon.vm02 (mon.0) 1295 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:50 vm02 bash[17473]: audit 2026-03-10T08:56:50.693506+0000 mon.vm02 (mon.0) 1295 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:53.252 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:52 vm02 bash[17473]: cluster 2026-03-10T08:56:51.678248+0000 mgr.vm02.ttibzz (mgr.14195) 1137 : cluster [DBG] pgmap v665: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:53.252 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:52 vm02 bash[17473]: cluster 2026-03-10T08:56:51.678248+0000 mgr.vm02.ttibzz (mgr.14195) 1137 : cluster [DBG] pgmap v665: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:53.883 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (7s) 3s ago 17m 3520k - 2.3.17-d1c9119 e85424b0d443 c38c281926f7 2026-03-10T08:56:54.084 INFO:teuthology.orchestra.run.vm02.stdout:Scheduled to stop haproxy.rgw.foo.vm07.plwsjk on host 'vm07' 2026-03-10T08:56:54.311 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm07.plwsjk to stop 2026-03-10T08:56:54.483 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:56:54.483 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (8s) 3s ago 17m 3520k - 2.3.17-d1c9119 e85424b0d443 c38c281926f7 2026-03-10T08:56:54.483 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (17m) 34s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 7c1849d4a489 2026-03-10T08:56:54.682 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: cluster 2026-03-10T08:56:53.678820+0000 mgr.vm02.ttibzz (mgr.14195) 1138 : cluster [DBG] pgmap v666: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: cluster 2026-03-10T08:56:53.678820+0000 mgr.vm02.ttibzz (mgr.14195) 1138 : cluster [DBG] pgmap v666: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:53.867050+0000 mgr.vm02.ttibzz (mgr.14195) 1139 : audit [DBG] from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:53.867050+0000 mgr.vm02.ttibzz (mgr.14195) 1139 : audit [DBG] from='client.16940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.077650+0000 mon.vm02 (mon.0) 1296 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.077650+0000 mon.vm02 (mon.0) 1296 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.085076+0000 mon.vm02 (mon.0) 1297 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.085076+0000 mon.vm02 (mon.0) 1297 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.086920+0000 mon.vm02 (mon.0) 1298 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.086920+0000 mon.vm02 (mon.0) 1298 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.089097+0000 mon.vm02 (mon.0) 1299 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.089097+0000 mon.vm02 (mon.0) 1299 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.092366+0000 mon.vm02 (mon.0) 1300 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.092366+0000 mon.vm02 (mon.0) 1300 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.098377+0000 mon.vm02 (mon.0) 1301 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.098377+0000 mon.vm02 (mon.0) 1301 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.100773+0000 mon.vm02 (mon.0) 1302 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.100773+0000 mon.vm02 (mon.0) 1302 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.686655+0000 mon.vm02 (mon.0) 1303 : audit [DBG] from='client.? 192.168.123.102:0/1103739797' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:54 vm02 bash[17473]: audit 2026-03-10T08:56:54.686655+0000 mon.vm02 (mon.0) 1303 : audit [DBG] from='client.? 192.168.123.102:0/1103739797' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: audit 2026-03-10T08:56:54.069926+0000 mgr.vm02.ttibzz (mgr.14195) 1140 : audit [DBG] from='client.16944 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm07.plwsjk", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: audit 2026-03-10T08:56:54.069926+0000 mgr.vm02.ttibzz (mgr.14195) 1140 : audit [DBG] from='client.16944 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm07.plwsjk", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: cephadm 2026-03-10T08:56:54.070309+0000 mgr.vm02.ttibzz (mgr.14195) 1141 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm07.plwsjk 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: cephadm 2026-03-10T08:56:54.070309+0000 mgr.vm02.ttibzz (mgr.14195) 1141 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm07.plwsjk 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: audit 2026-03-10T08:56:54.295923+0000 mgr.vm02.ttibzz (mgr.14195) 1142 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: audit 2026-03-10T08:56:54.295923+0000 mgr.vm02.ttibzz (mgr.14195) 1142 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: audit 2026-03-10T08:56:54.482975+0000 mgr.vm02.ttibzz (mgr.14195) 1143 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:56.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:55 vm02 bash[17473]: audit 2026-03-10T08:56:54.482975+0000 mgr.vm02.ttibzz (mgr.14195) 1143 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:56:57.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:57 vm02 bash[17473]: cluster 2026-03-10T08:56:55.679311+0000 mgr.vm02.ttibzz (mgr.14195) 1144 : cluster [DBG] pgmap v667: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:57.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:57 vm02 bash[17473]: cluster 2026-03-10T08:56:55.679311+0000 mgr.vm02.ttibzz (mgr.14195) 1144 : cluster [DBG] pgmap v667: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:58.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:58 vm02 bash[17473]: cluster 2026-03-10T08:56:57.679846+0000 mgr.vm02.ttibzz (mgr.14195) 1145 : cluster [DBG] pgmap v668: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:58.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:56:58 vm02 bash[17473]: cluster 2026-03-10T08:56:57.679846+0000 mgr.vm02.ttibzz (mgr.14195) 1145 : cluster [DBG] pgmap v668: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:56:59.861 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm07.plwsjk to stop 2026-03-10T08:57:00.023 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:57:00.023 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (14s) 9s ago 17m 3520k - 2.3.17-d1c9119 e85424b0d443 c38c281926f7 2026-03-10T08:57:00.023 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (17m) 40s ago 17m 3691k - 2.3.17-d1c9119 e85424b0d443 7c1849d4a489 2026-03-10T08:57:00.225 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.102549+0000 mon.vm02 (mon.0) 1304 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.102549+0000 mon.vm02 (mon.0) 1304 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.108005+0000 mon.vm02 (mon.0) 1305 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.108005+0000 mon.vm02 (mon.0) 1305 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.139987+0000 mon.vm02 (mon.0) 1306 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.139987+0000 mon.vm02 (mon.0) 1306 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: cluster 2026-03-10T08:56:59.680382+0000 mgr.vm02.ttibzz (mgr.14195) 1146 : cluster [DBG] pgmap v669: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: cluster 2026-03-10T08:56:59.680382+0000 mgr.vm02.ttibzz (mgr.14195) 1146 : cluster [DBG] pgmap v669: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.849904+0000 mgr.vm02.ttibzz (mgr.14195) 1147 : audit [DBG] from='client.16960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:56:59.849904+0000 mgr.vm02.ttibzz (mgr.14195) 1147 : audit [DBG] from='client.16960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:57:00.019151+0000 mgr.vm02.ttibzz (mgr.14195) 1148 : audit [DBG] from='client.16964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:00.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:00 vm02 bash[17473]: audit 2026-03-10T08:57:00.019151+0000 mgr.vm02.ttibzz (mgr.14195) 1148 : audit [DBG] from='client.16964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:01.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:01 vm02 bash[17473]: audit 2026-03-10T08:57:00.229637+0000 mon.vm02 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.102:0/4196773730' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:01.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:01 vm02 bash[17473]: audit 2026-03-10T08:57:00.229637+0000 mon.vm02 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.102:0/4196773730' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:02.531 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:02 vm02 bash[17473]: cluster 2026-03-10T08:57:01.680835+0000 mgr.vm02.ttibzz (mgr.14195) 1149 : cluster [DBG] pgmap v670: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:02.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:02 vm02 bash[17473]: cluster 2026-03-10T08:57:01.680835+0000 mgr.vm02.ttibzz (mgr.14195) 1149 : cluster [DBG] pgmap v670: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.431351+0000 mon.vm02 (mon.0) 1308 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.431351+0000 mon.vm02 (mon.0) 1308 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.435676+0000 mon.vm02 (mon.0) 1309 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.435676+0000 mon.vm02 (mon.0) 1309 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.437025+0000 mon.vm02 (mon.0) 1310 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.437025+0000 mon.vm02 (mon.0) 1310 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.439706+0000 mon.vm02 (mon.0) 1311 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.439706+0000 mon.vm02 (mon.0) 1311 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.445546+0000 mon.vm02 (mon.0) 1312 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.445546+0000 mon.vm02 (mon.0) 1312 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.447190+0000 mon.vm02 (mon.0) 1313 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:03.447190+0000 mon.vm02 (mon.0) 1313 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: cluster 2026-03-10T08:57:03.681374+0000 mgr.vm02.ttibzz (mgr.14195) 1150 : cluster [DBG] pgmap v671: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: cluster 2026-03-10T08:57:03.681374+0000 mgr.vm02.ttibzz (mgr.14195) 1150 : cluster [DBG] pgmap v671: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:04.260829+0000 mon.vm02 (mon.0) 1314 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:04.782 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:04 vm02 bash[17473]: audit 2026-03-10T08:57:04.260829+0000 mon.vm02 (mon.0) 1314 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:05.413 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 stopped 1s ago 17m - - 2026-03-10T08:57:05.418 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:57:05.419 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:57:05.420 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T08:57:05.597 INFO:teuthology.orchestra.run.vm02.stdout:anonymousScheduled to start haproxy.rgw.foo.vm07.plwsjk on host 'vm07' 2026-03-10T08:57:05.794 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm07.plwsjk to start 2026-03-10T08:57:05.982 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:57:05.982 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (20s) 15s ago 17m 3520k - 2.3.17-d1c9119 e85424b0d443 c38c281926f7 2026-03-10T08:57:05.982 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 stopped 2s ago 17m - - 2026-03-10T08:57:06.221 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.401928+0000 mgr.vm02.ttibzz (mgr.14195) 1151 : audit [DBG] from='client.16972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.401928+0000 mgr.vm02.ttibzz (mgr.14195) 1151 : audit [DBG] from='client.16972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.586568+0000 mgr.vm02.ttibzz (mgr.14195) 1152 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm07.plwsjk", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.586568+0000 mgr.vm02.ttibzz (mgr.14195) 1152 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm07.plwsjk", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: cephadm 2026-03-10T08:57:05.586978+0000 mgr.vm02.ttibzz (mgr.14195) 1153 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm07.plwsjk 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: cephadm 2026-03-10T08:57:05.586978+0000 mgr.vm02.ttibzz (mgr.14195) 1153 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm07.plwsjk 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.593205+0000 mon.vm02 (mon.0) 1315 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.593205+0000 mon.vm02 (mon.0) 1315 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.598533+0000 mon.vm02 (mon.0) 1316 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.598533+0000 mon.vm02 (mon.0) 1316 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.599759+0000 mon.vm02 (mon.0) 1317 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.599759+0000 mon.vm02 (mon.0) 1317 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: cluster 2026-03-10T08:57:05.681949+0000 mgr.vm02.ttibzz (mgr.14195) 1154 : cluster [DBG] pgmap v672: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: cluster 2026-03-10T08:57:05.681949+0000 mgr.vm02.ttibzz (mgr.14195) 1154 : cluster [DBG] pgmap v672: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.779168+0000 mgr.vm02.ttibzz (mgr.14195) 1155 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.779168+0000 mgr.vm02.ttibzz (mgr.14195) 1155 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.983976+0000 mgr.vm02.ttibzz (mgr.14195) 1156 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:05.983976+0000 mgr.vm02.ttibzz (mgr.14195) 1156 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.009629+0000 mon.vm02 (mon.0) 1318 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.009629+0000 mon.vm02 (mon.0) 1318 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.016164+0000 mon.vm02 (mon.0) 1319 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.016164+0000 mon.vm02 (mon.0) 1319 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.017366+0000 mon.vm02 (mon.0) 1320 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.017366+0000 mon.vm02 (mon.0) 1320 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.017898+0000 mon.vm02 (mon.0) 1321 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.017898+0000 mon.vm02 (mon.0) 1321 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.023062+0000 mon.vm02 (mon.0) 1322 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.023062+0000 mon.vm02 (mon.0) 1322 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.024595+0000 mon.vm02 (mon.0) 1323 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.024595+0000 mon.vm02 (mon.0) 1323 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.225507+0000 mon.vm02 (mon.0) 1324 : audit [DBG] from='client.? 192.168.123.102:0/3705374886' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:07.033 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:06 vm02 bash[17473]: audit 2026-03-10T08:57:06.225507+0000 mon.vm02 (mon.0) 1324 : audit [DBG] from='client.? 192.168.123.102:0/3705374886' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:09.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:08 vm02 bash[17473]: cluster 2026-03-10T08:57:07.682468+0000 mgr.vm02.ttibzz (mgr.14195) 1157 : cluster [DBG] pgmap v673: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:09.032 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:08 vm02 bash[17473]: cluster 2026-03-10T08:57:07.682468+0000 mgr.vm02.ttibzz (mgr.14195) 1157 : cluster [DBG] pgmap v673: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:11.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:10 vm02 bash[17473]: cluster 2026-03-10T08:57:09.682986+0000 mgr.vm02.ttibzz (mgr.14195) 1158 : cluster [DBG] pgmap v674: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:11.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:10 vm02 bash[17473]: cluster 2026-03-10T08:57:09.682986+0000 mgr.vm02.ttibzz (mgr.14195) 1158 : cluster [DBG] pgmap v674: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:11.399 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm07.plwsjk to start 2026-03-10T08:57:11.560 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:57:11.560 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (25s) 20s ago 17m 3520k - 2.3.17-d1c9119 e85424b0d443 c38c281926f7 2026-03-10T08:57:11.560 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 stopped 8s ago 18m - - 2026-03-10T08:57:11.758 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:57:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:11 vm02 bash[17473]: audit 2026-03-10T08:57:11.763117+0000 mon.vm02 (mon.0) 1325 : audit [DBG] from='client.? 192.168.123.102:0/522363544' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:12.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:11 vm02 bash[17473]: audit 2026-03-10T08:57:11.763117+0000 mon.vm02 (mon.0) 1325 : audit [DBG] from='client.? 192.168.123.102:0/522363544' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:13.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:12 vm02 bash[17473]: audit 2026-03-10T08:57:11.389061+0000 mgr.vm02.ttibzz (mgr.14195) 1159 : audit [DBG] from='client.16992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:13.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:12 vm02 bash[17473]: audit 2026-03-10T08:57:11.389061+0000 mgr.vm02.ttibzz (mgr.14195) 1159 : audit [DBG] from='client.16992 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:13.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:12 vm02 bash[17473]: audit 2026-03-10T08:57:11.562340+0000 mgr.vm02.ttibzz (mgr.14195) 1160 : audit [DBG] from='client.16996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:13.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:12 vm02 bash[17473]: audit 2026-03-10T08:57:11.562340+0000 mgr.vm02.ttibzz (mgr.14195) 1160 : audit [DBG] from='client.16996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:13.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:12 vm02 bash[17473]: cluster 2026-03-10T08:57:11.683473+0000 mgr.vm02.ttibzz (mgr.14195) 1161 : cluster [DBG] pgmap v675: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:13.196 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:12 vm02 bash[17473]: cluster 2026-03-10T08:57:11.683473+0000 mgr.vm02.ttibzz (mgr.14195) 1161 : cluster [DBG] pgmap v675: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:14 vm02 bash[17473]: cluster 2026-03-10T08:57:13.684012+0000 mgr.vm02.ttibzz (mgr.14195) 1162 : cluster [DBG] pgmap v676: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:14 vm02 bash[17473]: cluster 2026-03-10T08:57:13.684012+0000 mgr.vm02.ttibzz (mgr.14195) 1162 : cluster [DBG] pgmap v676: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:14 vm02 bash[17473]: audit 2026-03-10T08:57:14.926917+0000 mon.vm02 (mon.0) 1326 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:15.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:14 vm02 bash[17473]: audit 2026-03-10T08:57:14.926917+0000 mon.vm02 (mon.0) 1326 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:16.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:15 vm02 bash[17473]: audit 2026-03-10T08:57:14.933150+0000 mon.vm02 (mon.0) 1327 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:16.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:15 vm02 bash[17473]: audit 2026-03-10T08:57:14.933150+0000 mon.vm02 (mon.0) 1327 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:16.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:15 vm02 bash[17473]: audit 2026-03-10T08:57:14.966114+0000 mon.vm02 (mon.0) 1328 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:57:16.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:15 vm02 bash[17473]: audit 2026-03-10T08:57:14.966114+0000 mon.vm02 (mon.0) 1328 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-10T08:57:16.951 INFO:teuthology.orchestra.run.vm02.stdout:Waiting for haproxy.rgw.foo.vm07.plwsjk to start 2026-03-10T08:57:17.153 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:57:17.153 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (31s) 26s ago 18m 3520k - 2.3.17-d1c9119 e85424b0d443 c38c281926f7 2026-03-10T08:57:17.153 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 stopped 13s ago 18m - - 2026-03-10T08:57:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:16 vm02 bash[17473]: cluster 2026-03-10T08:57:15.684417+0000 mgr.vm02.ttibzz (mgr.14195) 1163 : cluster [DBG] pgmap v677: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:17.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:16 vm02 bash[17473]: cluster 2026-03-10T08:57:15.684417+0000 mgr.vm02.ttibzz (mgr.14195) 1163 : cluster [DBG] pgmap v677: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:17.354 INFO:teuthology.orchestra.run.vm02.stdout:HEALTH_OK 2026-03-10T08:57:18.278 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:17 vm02 bash[17473]: audit 2026-03-10T08:57:16.939882+0000 mgr.vm02.ttibzz (mgr.14195) 1164 : audit [DBG] from='client.17004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:18.278 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:17 vm02 bash[17473]: audit 2026-03-10T08:57:16.939882+0000 mgr.vm02.ttibzz (mgr.14195) 1164 : audit [DBG] from='client.17004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:18.278 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:17 vm02 bash[17473]: audit 2026-03-10T08:57:17.358914+0000 mon.vm02 (mon.0) 1329 : audit [DBG] from='client.? 192.168.123.102:0/2594363861' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:18.278 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:17 vm02 bash[17473]: audit 2026-03-10T08:57:17.358914+0000 mon.vm02 (mon.0) 1329 : audit [DBG] from='client.? 192.168.123.102:0/2594363861' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-10T08:57:19.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:18 vm02 bash[17473]: audit 2026-03-10T08:57:17.155925+0000 mgr.vm02.ttibzz (mgr.14195) 1165 : audit [DBG] from='client.17008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:18 vm02 bash[17473]: audit 2026-03-10T08:57:17.155925+0000 mgr.vm02.ttibzz (mgr.14195) 1165 : audit [DBG] from='client.17008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:18 vm02 bash[17473]: cluster 2026-03-10T08:57:17.684786+0000 mgr.vm02.ttibzz (mgr.14195) 1166 : cluster [DBG] pgmap v678: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:19.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:18 vm02 bash[17473]: cluster 2026-03-10T08:57:17.684786+0000 mgr.vm02.ttibzz (mgr.14195) 1166 : cluster [DBG] pgmap v678: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.261103+0000 mon.vm02 (mon.0) 1330 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.261103+0000 mon.vm02 (mon.0) 1330 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.426661+0000 mon.vm02 (mon.0) 1331 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.426661+0000 mon.vm02 (mon.0) 1331 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.432339+0000 mon.vm02 (mon.0) 1332 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.432339+0000 mon.vm02 (mon.0) 1332 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.433444+0000 mon.vm02 (mon.0) 1333 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.433444+0000 mon.vm02 (mon.0) 1333 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.434085+0000 mon.vm02 (mon.0) 1334 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.434085+0000 mon.vm02 (mon.0) 1334 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.439759+0000 mon.vm02 (mon.0) 1335 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.439759+0000 mon.vm02 (mon.0) 1335 : audit [INF] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.442601+0000 mon.vm02 (mon.0) 1336 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:57:20.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:19 vm02 bash[17473]: audit 2026-03-10T08:57:19.442601+0000 mon.vm02 (mon.0) 1336 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-10T08:57:21.281 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:20 vm02 bash[17473]: cluster 2026-03-10T08:57:19.685300+0000 mgr.vm02.ttibzz (mgr.14195) 1167 : cluster [DBG] pgmap v679: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:21.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:20 vm02 bash[17473]: cluster 2026-03-10T08:57:19.685300+0000 mgr.vm02.ttibzz (mgr.14195) 1167 : cluster [DBG] pgmap v679: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:22.530 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (7s) 3s ago 18m 3491k - 2.3.17-d1c9119 e85424b0d443 057861cf736a 2026-03-10T08:57:22.535 INFO:teuthology.orchestra.run.vm02.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-03-10T08:57:22.535 INFO:teuthology.orchestra.run.vm02.stderr: Dload Upload Total Spent Left Speed 2026-03-10T08:57:22.535 INFO:teuthology.orchestra.run.vm02.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-03-10T08:57:22.595 INFO:teuthology.orchestra.run.vm02.stdout:anonymous 2026-03-10T08:57:22.595 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-10T08:57:22.598 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm02.local 2026-03-10T08:57:22.598 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-03-10T08:57:22.998 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:23 vm02 bash[17473]: cluster 2026-03-10T08:57:21.685708+0000 mgr.vm02.ttibzz (mgr.14195) 1168 : cluster [DBG] pgmap v680: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:22.998 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:23 vm02 bash[17473]: cluster 2026-03-10T08:57:21.685708+0000 mgr.vm02.ttibzz (mgr.14195) 1168 : cluster [DBG] pgmap v680: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:24 vm02 bash[17473]: audit 2026-03-10T08:57:22.518388+0000 mgr.vm02.ttibzz (mgr.14195) 1169 : audit [DBG] from='client.17016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:24.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:24 vm02 bash[17473]: audit 2026-03-10T08:57:22.518388+0000 mgr.vm02.ttibzz (mgr.14195) 1169 : audit [DBG] from='client.17016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:25.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:25 vm02 bash[17473]: cluster 2026-03-10T08:57:23.686157+0000 mgr.vm02.ttibzz (mgr.14195) 1170 : cluster [DBG] pgmap v681: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:25.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:25 vm02 bash[17473]: cluster 2026-03-10T08:57:23.686157+0000 mgr.vm02.ttibzz (mgr.14195) 1170 : cluster [DBG] pgmap v681: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:27.272 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:27.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:27 vm02 bash[17473]: cluster 2026-03-10T08:57:25.686636+0000 mgr.vm02.ttibzz (mgr.14195) 1171 : cluster [DBG] pgmap v682: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:27.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:27 vm02 bash[17473]: cluster 2026-03-10T08:57:25.686636+0000 mgr.vm02.ttibzz (mgr.14195) 1171 : cluster [DBG] pgmap v682: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:27.397 INFO:teuthology.orchestra.run.vm02.stdout:167 167 2026-03-10T08:57:27.458 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch status' 2026-03-10T08:57:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:28 vm02 bash[17473]: cluster 2026-03-10T08:57:27.687037+0000 mgr.vm02.ttibzz (mgr.14195) 1172 : cluster [DBG] pgmap v683: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:28.532 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:28 vm02 bash[17473]: cluster 2026-03-10T08:57:27.687037+0000 mgr.vm02.ttibzz (mgr.14195) 1172 : cluster [DBG] pgmap v683: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:30 vm02 bash[17473]: cluster 2026-03-10T08:57:29.687670+0000 mgr.vm02.ttibzz (mgr.14195) 1173 : cluster [DBG] pgmap v684: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:57:31.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:30 vm02 bash[17473]: cluster 2026-03-10T08:57:29.687670+0000 mgr.vm02.ttibzz (mgr.14195) 1173 : cluster [DBG] pgmap v684: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-03-10T08:57:31.315 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:31.579 INFO:teuthology.orchestra.run.vm02.stdout:Backend: cephadm 2026-03-10T08:57:31.579 INFO:teuthology.orchestra.run.vm02.stdout:Available: Yes 2026-03-10T08:57:31.579 INFO:teuthology.orchestra.run.vm02.stdout:Paused: No 2026-03-10T08:57:31.640 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch ps' 2026-03-10T08:57:33.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:32 vm02 bash[17473]: audit 2026-03-10T08:57:31.583114+0000 mgr.vm02.ttibzz (mgr.14195) 1174 : audit [DBG] from='client.17020 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:33.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:32 vm02 bash[17473]: audit 2026-03-10T08:57:31.583114+0000 mgr.vm02.ttibzz (mgr.14195) 1174 : audit [DBG] from='client.17020 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:33.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:32 vm02 bash[17473]: cluster 2026-03-10T08:57:31.688191+0000 mgr.vm02.ttibzz (mgr.14195) 1175 : cluster [DBG] pgmap v685: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:33.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:32 vm02 bash[17473]: cluster 2026-03-10T08:57:31.688191+0000 mgr.vm02.ttibzz (mgr.14195) 1175 : cluster [DBG] pgmap v685: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:34 vm02 bash[17473]: cluster 2026-03-10T08:57:33.688651+0000 mgr.vm02.ttibzz (mgr.14195) 1176 : cluster [DBG] pgmap v686: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:34 vm02 bash[17473]: cluster 2026-03-10T08:57:33.688651+0000 mgr.vm02.ttibzz (mgr.14195) 1176 : cluster [DBG] pgmap v686: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:34 vm02 bash[17473]: audit 2026-03-10T08:57:34.261302+0000 mon.vm02 (mon.0) 1337 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:35.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:34 vm02 bash[17473]: audit 2026-03-10T08:57:34.261302+0000 mon.vm02 (mon.0) 1337 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:35.357 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager.vm02 vm02 *:9093,9094 running (21m) 44s ago 21m 14.4M - 0.25.0 c8568f914cd2 16c87a6d1f4a 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:ceph-exporter.vm02 vm02 *:9926 running (21m) 44s ago 21m 10.5M - 19.2.3-678-ge911bdeb 654f31e6858e 12e8503af357 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:ceph-exporter.vm07 vm07 *:9926 running (21m) 16s ago 21m 6072k - 19.2.3-678-ge911bdeb 654f31e6858e a8d32ada6006 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:crash.vm02 vm02 running (21m) 44s ago 21m 7296k - 19.2.3-678-ge911bdeb 654f31e6858e 4106c3e4838e 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:crash.vm07 vm07 running (21m) 16s ago 21m 7300k - 19.2.3-678-ge911bdeb 654f31e6858e 3100cb6ea492 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:grafana.vm02 vm02 *:3000 running (21m) 44s ago 21m 66.5M - 10.4.0 c8b91775d855 05dae469dd45 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm02.rwnyxr vm02 *:9000,9001 running (49s) 44s ago 18m 3520k - 2.3.17-d1c9119 e85424b0d443 c38c281926f7 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:haproxy.rgw.foo.vm07.plwsjk vm07 *:9000,9001 running (20s) 16s ago 18m 3491k - 2.3.17-d1c9119 e85424b0d443 057861cf736a 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:keepalived.rgw.foo.vm02.sbacfj vm02 running (18m) 44s ago 18m 2480k - 2.2.4 4a3a1ff181d9 685b915be9c1 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:keepalived.rgw.foo.vm07.fctrof vm07 running (18m) 16s ago 18m 2492k - 2.2.4 4a3a1ff181d9 2bc8f983075f 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:mgr.vm02.ttibzz vm02 *:9283,8765,8443 running (22m) 44s ago 22m 548M - 19.2.3-678-ge911bdeb 654f31e6858e 8d9c2a7da34e 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:mgr.vm07.aunzpk vm07 *:8443,9283,8765 running (21m) 16s ago 21m 474M - 19.2.3-678-ge911bdeb 654f31e6858e 1af99d1fee53 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:mon.vm02 vm02 running (22m) 44s ago 22m 69.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e ba23f51f501b 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:mon.vm07 vm07 running (21m) 16s ago 21m 43.9M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 71f3c37e9d30 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.vm02 vm02 *:9100 running (21m) 44s ago 21m 8368k - 1.7.0 72c9c2088986 0351d8835d2b 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter.vm07 vm07 *:9100 running (21m) 16s ago 21m 8212k - 1.7.0 72c9c2088986 0cfd18951c12 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.0 vm02 running (20m) 44s ago 20m 53.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e bb43d29b6166 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.1 vm07 running (20m) 16s ago 20m 75.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a07a6e9c27d7 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.2 vm02 running (20m) 44s ago 20m 71.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e a95fdb8431e2 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.3 vm07 running (20m) 16s ago 20m 57.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e f420a4d87e7d 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.4 vm02 running (20m) 44s ago 20m 57.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 4e32ee3ddf6b 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.5 vm07 running (20m) 16s ago 20m 75.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e c45cd22aa88c 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.6 vm02 running (20m) 44s ago 20m 73.7M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 42c11c146777 2026-03-10T08:57:35.644 INFO:teuthology.orchestra.run.vm02.stdout:osd.7 vm07 running (20m) 16s ago 20m 55.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 3ce871016ee2 2026-03-10T08:57:35.645 INFO:teuthology.orchestra.run.vm02.stdout:prometheus.vm02 vm02 *:9095 running (18m) 44s ago 21m 54.4M - 2.51.0 1d3b7f56885b 9758b9389ae1 2026-03-10T08:57:35.645 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.bmgnwf vm02 *:8001 running (12m) 44s ago 18m 123M - 19.2.3-678-ge911bdeb 654f31e6858e b100074ef3fc 2026-03-10T08:57:35.645 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm02.rugqqv vm02 *:8000 running (12m) 44s ago 18m 125M - 19.2.3-678-ge911bdeb 654f31e6858e 5dfbee772545 2026-03-10T08:57:35.645 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.wecerd vm07 *:8000 running (6m) 16s ago 18m 105M - 19.2.3-678-ge911bdeb 654f31e6858e 6dc79842fd85 2026-03-10T08:57:35.645 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo.vm07.zylyez vm07 *:8001 running (80s) 16s ago 18m 91.6M - 19.2.3-678-ge911bdeb 654f31e6858e 945cb65f23ce 2026-03-10T08:57:35.702 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch ls' 2026-03-10T08:57:37.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:36 vm02 bash[17473]: audit 2026-03-10T08:57:35.642360+0000 mgr.vm02.ttibzz (mgr.14195) 1177 : audit [DBG] from='client.17024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:37.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:36 vm02 bash[17473]: audit 2026-03-10T08:57:35.642360+0000 mgr.vm02.ttibzz (mgr.14195) 1177 : audit [DBG] from='client.17024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:37.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:36 vm02 bash[17473]: cluster 2026-03-10T08:57:35.689047+0000 mgr.vm02.ttibzz (mgr.14195) 1178 : cluster [DBG] pgmap v687: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:37.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:36 vm02 bash[17473]: cluster 2026-03-10T08:57:35.689047+0000 mgr.vm02.ttibzz (mgr.14195) 1178 : cluster [DBG] pgmap v687: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:38 vm02 bash[17473]: cluster 2026-03-10T08:57:37.689477+0000 mgr.vm02.ttibzz (mgr.14195) 1179 : cluster [DBG] pgmap v688: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:39.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:38 vm02 bash[17473]: cluster 2026-03-10T08:57:37.689477+0000 mgr.vm02.ttibzz (mgr.14195) 1179 : cluster [DBG] pgmap v688: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:39.401 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:39.721 INFO:teuthology.orchestra.run.vm02.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-10T08:57:39.721 INFO:teuthology.orchestra.run.vm02.stdout:alertmanager ?:9093,9094 1/1 49s ago 22m count:1 2026-03-10T08:57:39.721 INFO:teuthology.orchestra.run.vm02.stdout:ceph-exporter ?:9926 2/2 49s ago 22m * 2026-03-10T08:57:39.721 INFO:teuthology.orchestra.run.vm02.stdout:crash 2/2 49s ago 22m * 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:grafana ?:3000 1/1 49s ago 22m count:1 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:ingress.rgw.foo 12.12.1.102:9000,9001 4/4 49s ago 18m count:2 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:mgr 2/2 49s ago 22m count:2 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:mon 2/2 49s ago 21m vm02:192.168.123.102=vm02;vm07:192.168.123.107=vm07;count:2 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:node-exporter ?:9100 2/2 49s ago 22m * 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:osd.all-available-devices 8 49s ago 21m * 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:prometheus ?:9095 1/1 49s ago 22m count:1 2026-03-10T08:57:39.722 INFO:teuthology.orchestra.run.vm02.stdout:rgw.foo ?:8000 4/4 49s ago 18m count:4;* 2026-03-10T08:57:39.787 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch host ls' 2026-03-10T08:57:41.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:40 vm02 bash[17473]: cluster 2026-03-10T08:57:39.689964+0000 mgr.vm02.ttibzz (mgr.14195) 1180 : cluster [DBG] pgmap v689: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:41.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:40 vm02 bash[17473]: cluster 2026-03-10T08:57:39.689964+0000 mgr.vm02.ttibzz (mgr.14195) 1180 : cluster [DBG] pgmap v689: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:41.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:40 vm02 bash[17473]: audit 2026-03-10T08:57:39.721490+0000 mgr.vm02.ttibzz (mgr.14195) 1181 : audit [DBG] from='client.17028 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:41.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:40 vm02 bash[17473]: audit 2026-03-10T08:57:39.721490+0000 mgr.vm02.ttibzz (mgr.14195) 1181 : audit [DBG] from='client.17028 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:43.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:42 vm02 bash[17473]: cluster 2026-03-10T08:57:41.690538+0000 mgr.vm02.ttibzz (mgr.14195) 1182 : cluster [DBG] pgmap v690: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:43.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:42 vm02 bash[17473]: cluster 2026-03-10T08:57:41.690538+0000 mgr.vm02.ttibzz (mgr.14195) 1182 : cluster [DBG] pgmap v690: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:43.445 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:43.714 INFO:teuthology.orchestra.run.vm02.stdout:HOST ADDR LABELS STATUS 2026-03-10T08:57:43.714 INFO:teuthology.orchestra.run.vm02.stdout:vm02 192.168.123.102 2026-03-10T08:57:43.714 INFO:teuthology.orchestra.run.vm02.stdout:vm07 192.168.123.107 2026-03-10T08:57:43.714 INFO:teuthology.orchestra.run.vm02.stdout:2 hosts in cluster 2026-03-10T08:57:43.776 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch device ls' 2026-03-10T08:57:45.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:44 vm02 bash[17473]: cluster 2026-03-10T08:57:43.690975+0000 mgr.vm02.ttibzz (mgr.14195) 1183 : cluster [DBG] pgmap v691: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:45.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:44 vm02 bash[17473]: cluster 2026-03-10T08:57:43.690975+0000 mgr.vm02.ttibzz (mgr.14195) 1183 : cluster [DBG] pgmap v691: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:45.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:44 vm02 bash[17473]: audit 2026-03-10T08:57:43.717649+0000 mgr.vm02.ttibzz (mgr.14195) 1184 : audit [DBG] from='client.17032 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:45.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:44 vm02 bash[17473]: audit 2026-03-10T08:57:43.717649+0000 mgr.vm02.ttibzz (mgr.14195) 1184 : audit [DBG] from='client.17032 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:46 vm02 bash[17473]: cluster 2026-03-10T08:57:45.692926+0000 mgr.vm02.ttibzz (mgr.14195) 1185 : cluster [DBG] pgmap v692: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:47.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:46 vm02 bash[17473]: cluster 2026-03-10T08:57:45.692926+0000 mgr.vm02.ttibzz (mgr.14195) 1185 : cluster [DBG] pgmap v692: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:47.492 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 18m ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdb hdd DWNBRSTVMM02001 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdc hdd DWNBRSTVMM02002 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vdd hdd DWNBRSTVMM02003 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm02 /dev/vde hdd DWNBRSTVMM02004 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 18m ago Has a FileSystem, Insufficient space (<5GB) 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdb hdd DWNBRSTVMM07001 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdc hdd DWNBRSTVMM07002 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vdd hdd DWNBRSTVMM07003 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.767 INFO:teuthology.orchestra.run.vm02.stdout:vm07 /dev/vde hdd DWNBRSTVMM07004 20.0G No 18m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-10T08:57:47.828 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-03-10T08:57:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:48 vm02 bash[17473]: cluster 2026-03-10T08:57:47.693270+0000 mgr.vm02.ttibzz (mgr.14195) 1186 : cluster [DBG] pgmap v693: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:48 vm02 bash[17473]: cluster 2026-03-10T08:57:47.693270+0000 mgr.vm02.ttibzz (mgr.14195) 1186 : cluster [DBG] pgmap v693: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:48 vm02 bash[17473]: audit 2026-03-10T08:57:47.770311+0000 mgr.vm02.ttibzz (mgr.14195) 1187 : audit [DBG] from='client.17036 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:49.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:48 vm02 bash[17473]: audit 2026-03-10T08:57:47.770311+0000 mgr.vm02.ttibzz (mgr.14195) 1187 : audit [DBG] from='client.17036 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:50.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:49 vm02 bash[17473]: audit 2026-03-10T08:57:49.261530+0000 mon.vm02 (mon.0) 1338 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:50.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:49 vm02 bash[17473]: audit 2026-03-10T08:57:49.261530+0000 mon.vm02 (mon.0) 1338 : audit [DBG] from='mgr.14195 192.168.123.102:0/1575492680' entity='mgr.vm02.ttibzz' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-10T08:57:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:50 vm02 bash[17473]: cluster 2026-03-10T08:57:49.693745+0000 mgr.vm02.ttibzz (mgr.14195) 1188 : cluster [DBG] pgmap v694: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:51.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:50 vm02 bash[17473]: cluster 2026-03-10T08:57:49.693745+0000 mgr.vm02.ttibzz (mgr.14195) 1188 : cluster [DBG] pgmap v694: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:51.536 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:51.924 INFO:teuthology.orchestra.run.vm02.stdout:osd.all-available-devices 8 61s ago 21m * 2026-03-10T08:57:52.006 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-03-10T08:57:52.009 INFO:tasks.vip:Removing 12.12.0.102 (and any VIPs) on vm02.local iface ens3... 2026-03-10T08:57:52.009 DEBUG:teuthology.orchestra.run.vm02:> sudo ip addr del 12.12.0.102/22 dev ens3 2026-03-10T08:57:52.018 DEBUG:teuthology.orchestra.run.vm02:> sudo ip addr del 12.12.1.102/22 dev ens3 2026-03-10T08:57:52.067 INFO:tasks.vip:Removing 12.12.0.107 (and any VIPs) on vm07.local iface ens3... 2026-03-10T08:57:52.068 DEBUG:teuthology.orchestra.run.vm07:> sudo ip addr del 12.12.0.107/22 dev ens3 2026-03-10T08:57:52.076 DEBUG:teuthology.orchestra.run.vm07:> sudo ip addr del 12.12.1.102/22 dev ens3 2026-03-10T08:57:52.124 INFO:teuthology.orchestra.run.vm07.stderr:RTNETLINK answers: Cannot assign requested address 2026-03-10T08:57:52.125 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-03-10T08:57:52.125 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-10T08:57:52.127 INFO:tasks.cephadm:Teardown begin 2026-03-10T08:57:52.127 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T08:57:52.137 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T08:57:52.176 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-10T08:57:52.176 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm --image quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 -- ceph mgr module disable cephadm 2026-03-10T08:57:53.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:52 vm02 bash[17473]: cluster 2026-03-10T08:57:51.694179+0000 mgr.vm02.ttibzz (mgr.14195) 1189 : cluster [DBG] pgmap v695: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:53.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:52 vm02 bash[17473]: cluster 2026-03-10T08:57:51.694179+0000 mgr.vm02.ttibzz (mgr.14195) 1189 : cluster [DBG] pgmap v695: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:53.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:52 vm02 bash[17473]: audit 2026-03-10T08:57:51.915869+0000 mgr.vm02.ttibzz (mgr.14195) 1190 : audit [DBG] from='client.17040 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:53.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:52 vm02 bash[17473]: audit 2026-03-10T08:57:51.915869+0000 mgr.vm02.ttibzz (mgr.14195) 1190 : audit [DBG] from='client.17040 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-10T08:57:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:55 vm02 bash[17473]: cluster 2026-03-10T08:57:53.694623+0000 mgr.vm02.ttibzz (mgr.14195) 1191 : cluster [DBG] pgmap v696: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:55.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:55 vm02 bash[17473]: cluster 2026-03-10T08:57:53.694623+0000 mgr.vm02.ttibzz (mgr.14195) 1191 : cluster [DBG] pgmap v696: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-03-10T08:57:56.833 INFO:teuthology.orchestra.run.vm02.stderr:Inferring config /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/mon.vm02/config 2026-03-10T08:57:56.979 INFO:teuthology.orchestra.run.vm02.stderr:2026-03-10T08:57:56.982+0000 7fba49855640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-03-10T08:57:56.979 INFO:teuthology.orchestra.run.vm02.stderr:2026-03-10T08:57:56.982+0000 7fba49855640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-03-10T08:57:56.979 INFO:teuthology.orchestra.run.vm02.stderr:2026-03-10T08:57:56.982+0000 7fba49855640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-03-10T08:57:56.979 INFO:teuthology.orchestra.run.vm02.stderr:2026-03-10T08:57:56.982+0000 7fba49855640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-03-10T08:57:56.979 INFO:teuthology.orchestra.run.vm02.stderr:2026-03-10T08:57:56.982+0000 7fba49855640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-03-10T08:57:56.979 INFO:teuthology.orchestra.run.vm02.stderr:2026-03-10T08:57:56.982+0000 7fba49855640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-03-10T08:57:56.979 INFO:teuthology.orchestra.run.vm02.stderr:2026-03-10T08:57:56.982+0000 7fba49855640 -1 monclient: keyring not found 2026-03-10T08:57:56.980 INFO:teuthology.orchestra.run.vm02.stderr:[errno 21] error connecting to the cluster 2026-03-10T08:57:57.030 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:57:57.030 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-10T08:57:57.030 DEBUG:teuthology.orchestra.run.vm02:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T08:57:57.033 DEBUG:teuthology.orchestra.run.vm07:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-10T08:57:57.036 INFO:tasks.cephadm:Stopping all daemons... 2026-03-10T08:57:57.036 INFO:tasks.cephadm.mon.vm02:Stopping mon.vm02... 2026-03-10T08:57:57.036 DEBUG:teuthology.orchestra.run.vm02:> sudo systemctl stop ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02 2026-03-10T08:57:57.134 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:57 vm02 bash[17473]: cluster 2026-03-10T08:57:55.695016+0000 mgr.vm02.ttibzz (mgr.14195) 1192 : cluster [DBG] pgmap v697: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:57.134 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:57 vm02 bash[17473]: cluster 2026-03-10T08:57:55.695016+0000 mgr.vm02.ttibzz (mgr.14195) 1192 : cluster [DBG] pgmap v697: 129 pgs: 129 active+clean; 454 KiB data, 259 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-03-10T08:57:57.134 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:57 vm02 systemd[1]: Stopping Ceph mon.vm02 for e750d050-1c5b-11f1-9e63-531fde0192f6... 2026-03-10T08:57:57.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:57 vm02 bash[17473]: debug 2026-03-10T08:57:57.134+0000 7fdb02bb8640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm02 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-03-10T08:57:57.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:57 vm02 bash[17473]: debug 2026-03-10T08:57:57.134+0000 7fdb02bb8640 -1 mon.vm02@0(leader) e2 *** Got Signal Terminated *** 2026-03-10T08:57:57.282 INFO:journalctl@ceph.mon.vm02.vm02.stdout:Mar 10 08:57:57 vm02 bash[78238]: ceph-e750d050-1c5b-11f1-9e63-531fde0192f6-mon-vm02 2026-03-10T08:57:57.324 DEBUG:teuthology.orchestra.run.vm02:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm02.service' 2026-03-10T08:57:57.348 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T08:57:57.348 INFO:tasks.cephadm.mon.vm02:Stopped mon.vm02 2026-03-10T08:57:57.348 INFO:tasks.cephadm.mon.vm07:Stopping mon.vm07... 2026-03-10T08:57:57.348 DEBUG:teuthology.orchestra.run.vm07:> sudo systemctl stop ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm07 2026-03-10T08:57:57.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:57:57 vm07 systemd[1]: Stopping Ceph mon.vm07 for e750d050-1c5b-11f1-9e63-531fde0192f6... 2026-03-10T08:57:57.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:57:57 vm07 bash[20988]: debug 2026-03-10T08:57:57.412+0000 7f94ec03e640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm07 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-03-10T08:57:57.492 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:57:57 vm07 bash[20988]: debug 2026-03-10T08:57:57.412+0000 7f94ec03e640 -1 mon.vm07@1(peon) e2 *** Got Signal Terminated *** 2026-03-10T08:57:57.631 INFO:journalctl@ceph.mon.vm07.vm07.stdout:Mar 10 08:57:57 vm07 bash[47730]: ceph-e750d050-1c5b-11f1-9e63-531fde0192f6-mon-vm07 2026-03-10T08:57:57.633 DEBUG:teuthology.orchestra.run.vm07:> sudo pkill -f 'journalctl -f -n 0 -u ceph-e750d050-1c5b-11f1-9e63-531fde0192f6@mon.vm07.service' 2026-03-10T08:57:57.650 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-10T08:57:57.650 INFO:tasks.cephadm.mon.vm07:Stopped mon.vm07 2026-03-10T08:57:57.650 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 --force --keep-logs 2026-03-10T08:57:57.746 INFO:teuthology.orchestra.run.vm02.stdout:Deleting cluster with fsid: e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:58:52.396 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 --force --keep-logs 2026-03-10T08:58:52.483 INFO:teuthology.orchestra.run.vm07.stdout:Deleting cluster with fsid: e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:59:44.074 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T08:59:44.081 INFO:teuthology.orchestra.run.vm02.stderr:rm: cannot remove '/etc/ceph/ceph.client.admin.keyring': Is a directory 2026-03-10T08:59:44.081 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:59:44.081 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-10T08:59:44.087 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-10T08:59:44.088 DEBUG:teuthology.misc:Transferring archived files from vm02:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966/remote/vm02/crash 2026-03-10T08:59:44.088 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/crash -- . 2026-03-10T08:59:44.130 INFO:teuthology.orchestra.run.vm02.stderr:tar: /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/crash: Cannot open: No such file or directory 2026-03-10T08:59:44.130 INFO:teuthology.orchestra.run.vm02.stderr:tar: Error is not recoverable: exiting now 2026-03-10T08:59:44.131 DEBUG:teuthology.misc:Transferring archived files from vm07:/var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/crash to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966/remote/vm07/crash 2026-03-10T08:59:44.131 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/crash -- . 2026-03-10T08:59:44.137 INFO:teuthology.orchestra.run.vm07.stderr:tar: /var/lib/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/crash: Cannot open: No such file or directory 2026-03-10T08:59:44.137 INFO:teuthology.orchestra.run.vm07.stderr:tar: Error is not recoverable: exiting now 2026-03-10T08:59:44.138 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-10T08:59:44.138 DEBUG:teuthology.orchestra.run.vm02:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-10T08:59:44.185 INFO:tasks.cephadm:Compressing logs... 2026-03-10T08:59:44.185 DEBUG:teuthology.orchestra.run.vm02:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T08:59:44.227 DEBUG:teuthology.orchestra.run.vm07:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T08:59:44.232 INFO:teuthology.orchestra.run.vm02.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T08:59:44.233 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T08:59:44.233 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.ceph-exporter.vm02.log 2026-03-10T08:59:44.233 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mgr.vm02.ttibzz.log 2026-03-10T08:59:44.234 INFO:teuthology.orchestra.run.vm07.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-10T08:59:44.234 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.ceph-exporter.vm02.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.log 2026-03-10T08:59:44.235 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-10T08:59:44.235 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.ceph-exporter.vm07.log 2026-03-10T08:59:44.235 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mgr.vm02.ttibzz.log: 94.9% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.ceph-exporter.vm02.log.gz 2026-03-10T08:59:44.235 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.3.log 2026-03-10T08:59:44.235 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mon.vm02.log 2026-03-10T08:59:44.235 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.ceph-exporter.vm07.log: 30.2% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.ceph-exporter.vm07.log.gz 2026-03-10T08:59:44.236 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.log 2026-03-10T08:59:44.236 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.3.log: 90.0% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T08:59:44.237 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.1.log 2026-03-10T08:59:44.237 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.log: 90.2% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.log.gz 2026-03-10T08:59:44.237 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm02.rugqqv.log 2026-03-10T08:59:44.238 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.log: 90.3% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.log.gz 2026-03-10T08:59:44.238 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.5.log 2026-03-10T08:59:44.245 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mgr.vm07.aunzpk.log 2026-03-10T08:59:44.256 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mon.vm02.log: 91.1% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-10T08:59:44.256 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.6.log 2026-03-10T08:59:44.265 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.7.log 2026-03-10T08:59:44.265 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm02.rugqqv.log: 93.2% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm02.rugqqv.log.gz 2026-03-10T08:59:44.265 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm02.bmgnwf.log 2026-03-10T08:59:44.273 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mgr.vm07.aunzpk.log: 93.3% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mgr.vm07.aunzpk.log.gz 2026-03-10T08:59:44.281 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm07.zylyez.log 2026-03-10T08:59:44.287 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.2.log 2026-03-10T08:59:44.289 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.7.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.audit.log 2026-03-10T08:59:44.289 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm02.bmgnwf.log: 94.3% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm02.bmgnwf.log.gz 2026-03-10T08:59:44.293 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm07.zylyez.log: 93.9% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm07.zylyez.log.gz 2026-03-10T08:59:44.293 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-volume.log 2026-03-10T08:59:44.307 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm02.rugqqv.log 2026-03-10T08:59:44.309 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.audit.log: 91.4% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.audit.log.gz 2026-03-10T08:59:44.309 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm07.zylyez.log 2026-03-10T08:59:44.319 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.audit.log 2026-03-10T08:59:44.322 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm02.rugqqv.log: 94.1% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm02.rugqqv.log.gz 2026-03-10T08:59:44.325 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.cephadm.log 2026-03-10T08:59:44.325 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm07.zylyez.log: 96.3% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-volume.log.gz 2026-03-10T08:59:44.327 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm07.wecerd.log 2026-03-10T08:59:44.328 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.cephadm.log: 93.1% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm07.zylyez.log.gz 2026-03-10T08:59:44.328 INFO:teuthology.orchestra.run.vm07.stderr: 82.8% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.cephadm.log.gz 2026-03-10T08:59:44.339 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-volume.log 2026-03-10T08:59:44.341 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mon.vm07.log 2026-03-10T08:59:44.341 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.audit.log: 91.3% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.audit.log.gz 2026-03-10T08:59:44.347 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm07.wecerd.log: 93.2% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm07.wecerd.log.gz 2026-03-10T08:59:44.347 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm02.bmgnwf.log 2026-03-10T08:59:44.349 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm07.wecerd.log 2026-03-10T08:59:44.355 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.cephadm.log 2026-03-10T08:59:44.359 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm02.bmgnwf.log: gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.4.log 2026-03-10T08:59:44.361 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.cephadm.log: 93.3% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ops-log-ceph-client.rgw.foo.vm02.bmgnwf.log.gz 2026-03-10T08:59:44.361 INFO:teuthology.orchestra.run.vm07.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mon.vm07.log: /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm07.wecerd.log: 93.9% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-client.rgw.foo.vm07.wecerd.log.gz 2026-03-10T08:59:44.361 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.0.log 2026-03-10T08:59:44.363 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.4.log: 83.4% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph.cephadm.log.gz 2026-03-10T08:59:44.380 INFO:teuthology.orchestra.run.vm02.stderr:/var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.0.log: 96.2% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-volume.log.gz 2026-03-10T08:59:44.697 INFO:teuthology.orchestra.run.vm02.stderr: 90.0% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mgr.vm02.ttibzz.log.gz 2026-03-10T08:59:44.740 INFO:teuthology.orchestra.run.vm07.stderr: 92.8% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mon.vm07.log.gz 2026-03-10T08:59:45.380 INFO:teuthology.orchestra.run.vm02.stderr: 90.8% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-mon.vm02.log.gz 2026-03-10T08:59:45.485 INFO:teuthology.orchestra.run.vm07.stderr: 93.6% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.1.log.gz 2026-03-10T08:59:45.490 INFO:teuthology.orchestra.run.vm02.stderr: 93.5% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.2.log.gz 2026-03-10T08:59:45.519 INFO:teuthology.orchestra.run.vm07.stderr: 93.5% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.7.log.gz 2026-03-10T08:59:45.591 INFO:teuthology.orchestra.run.vm07.stderr: 93.5% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.5.log.gz 2026-03-10T08:59:45.645 INFO:teuthology.orchestra.run.vm07.stderr: 93.8% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.3.log.gz 2026-03-10T08:59:45.647 INFO:teuthology.orchestra.run.vm07.stderr: 2026-03-10T08:59:45.647 INFO:teuthology.orchestra.run.vm07.stderr:real 0m1.417s 2026-03-10T08:59:45.647 INFO:teuthology.orchestra.run.vm07.stderr:user 0m2.617s 2026-03-10T08:59:45.647 INFO:teuthology.orchestra.run.vm07.stderr:sys 0m0.155s 2026-03-10T08:59:45.701 INFO:teuthology.orchestra.run.vm02.stderr: 93.5% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.6.log.gz 2026-03-10T08:59:45.709 INFO:teuthology.orchestra.run.vm02.stderr: 93.5% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.4.log.gz 2026-03-10T08:59:45.752 INFO:teuthology.orchestra.run.vm02.stderr: 93.4% -- replaced with /var/log/ceph/e750d050-1c5b-11f1-9e63-531fde0192f6/ceph-osd.0.log.gz 2026-03-10T08:59:45.753 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T08:59:45.753 INFO:teuthology.orchestra.run.vm02.stderr:real 0m1.524s 2026-03-10T08:59:45.753 INFO:teuthology.orchestra.run.vm02.stderr:user 0m2.843s 2026-03-10T08:59:45.753 INFO:teuthology.orchestra.run.vm02.stderr:sys 0m0.154s 2026-03-10T08:59:45.753 INFO:tasks.cephadm:Archiving logs... 2026-03-10T08:59:45.753 DEBUG:teuthology.misc:Transferring archived files from vm02:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966/remote/vm02/log 2026-03-10T08:59:45.753 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T08:59:45.955 DEBUG:teuthology.misc:Transferring archived files from vm07:/var/log/ceph to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966/remote/vm07/log 2026-03-10T08:59:45.955 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-10T08:59:46.102 INFO:tasks.cephadm:Removing cluster... 2026-03-10T08:59:46.102 DEBUG:teuthology.orchestra.run.vm02:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 --force 2026-03-10T08:59:46.189 INFO:teuthology.orchestra.run.vm02.stdout:Deleting cluster with fsid: e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:59:47.254 DEBUG:teuthology.orchestra.run.vm07:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid e750d050-1c5b-11f1-9e63-531fde0192f6 --force 2026-03-10T08:59:47.336 INFO:teuthology.orchestra.run.vm07.stdout:Deleting cluster with fsid: e750d050-1c5b-11f1-9e63-531fde0192f6 2026-03-10T08:59:48.403 INFO:tasks.cephadm:Removing cephadm ... 2026-03-10T08:59:48.403 DEBUG:teuthology.orchestra.run.vm02:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T08:59:48.406 DEBUG:teuthology.orchestra.run.vm07:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-10T08:59:48.409 INFO:tasks.cephadm:Teardown complete 2026-03-10T08:59:48.409 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-10T08:59:48.411 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-10T08:59:48.411 DEBUG:teuthology.orchestra.run.vm02:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T08:59:48.447 DEBUG:teuthology.orchestra.run.vm07:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-10T08:59:48.523 INFO:teuthology.orchestra.run.vm07.stdout: remote refid st t when poll reach delay offset jitter 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout:============================================================================== 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout:-mail2.light-spe 237.17.204.95 2 u 44 64 377 28.714 -0.013 0.046 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout:*static.215.156. 35.73.197.144 2 u 44 64 377 23.561 -0.270 0.581 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout:+static.buzo.eu 100.10.69.89 2 u 53 64 377 23.584 -0.270 1.007 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout:+ntp2.kernfusion 192.53.103.108 2 u 49 64 377 30.420 -0.104 2.770 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout:+185.125.190.57 194.121.207.249 2 u 33 64 377 32.071 +0.003 0.331 2026-03-10T08:59:48.524 INFO:teuthology.orchestra.run.vm07.stdout:+141.144.246.224 146.131.121.246 2 u 45 64 377 29.373 -0.186 1.803 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout: remote refid st t when poll reach delay offset jitter 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout:============================================================================== 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout:+mail2.light-spe 237.17.204.95 2 u 61 128 377 28.895 -5.105 1.307 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout:+static.215.156. 35.73.197.144 2 u 120 128 377 23.549 -4.953 0.869 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout:+static.buzo.eu 100.10.69.89 2 u 122 128 377 23.740 -5.053 0.541 2026-03-10T08:59:49.901 INFO:teuthology.orchestra.run.vm02.stdout:#s7.vonderste.in 131.188.3.222 2 u 117 128 277 28.300 -9.440 2.909 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:-srv01-nc.secure 129.69.253.1 2 u 56 128 377 28.300 -8.501 0.770 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:-sambuca.psychon 174.222.245.115 2 u 116 128 377 25.002 -5.686 0.486 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:-time.ndless.net 192.53.103.108 2 u 59 128 377 28.788 -3.260 1.351 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:+212.132.108.186 130.149.17.8 2 u 120 128 377 28.951 -4.658 0.799 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:#resolv1.bbone.n 131.188.3.222 2 u 111 128 377 31.181 -5.090 0.492 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:+time.cloudflare 10.17.8.4 3 u 54 128 377 20.432 -3.852 0.755 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:#128.127.67.142 171.237.1.87 2 u 248 128 336 29.948 -6.182 0.600 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:-ntp2.kernfusion 192.53.103.108 2 u 120 128 377 31.328 -6.009 0.890 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:#ns8.starka.st 129.134.28.123 2 u 121 128 377 22.685 -6.825 0.892 2026-03-10T08:59:49.902 INFO:teuthology.orchestra.run.vm02.stdout:*141.144.246.224 146.131.121.246 2 u 54 128 377 29.297 -4.862 1.043 2026-03-10T08:59:49.902 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-10T08:59:49.904 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-10T08:59:49.904 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-10T08:59:49.907 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-10T08:59:49.909 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-10T08:59:49.911 INFO:teuthology.task.internal:Duration was 1677.577804 seconds 2026-03-10T08:59:49.911 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-10T08:59:49.913 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-10T08:59:49.913 DEBUG:teuthology.orchestra.run.vm02:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T08:59:49.914 DEBUG:teuthology.orchestra.run.vm07:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-10T08:59:49.937 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-10T08:59:49.937 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm02.local 2026-03-10T08:59:49.937 DEBUG:teuthology.orchestra.run.vm02:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T08:59:49.987 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm07.local 2026-03-10T08:59:49.987 DEBUG:teuthology.orchestra.run.vm07:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-10T08:59:49.997 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-10T08:59:49.997 DEBUG:teuthology.orchestra.run.vm02:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T08:59:50.031 DEBUG:teuthology.orchestra.run.vm07:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T08:59:50.104 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-10T08:59:50.104 DEBUG:teuthology.orchestra.run.vm02:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T08:59:50.105 DEBUG:teuthology.orchestra.run.vm07:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-10T08:59:50.110 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T08:59:50.110 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T08:59:50.110 INFO:teuthology.orchestra.run.vm02.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T08:59:50.111 INFO:teuthology.orchestra.run.vm02.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: /home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T08:59:50.111 INFO:teuthology.orchestra.run.vm02.stderr: 2026-03-10T08:59:50.118 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-10T08:59:50.118 INFO:teuthology.orchestra.run.vm07.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-10T08:59:50.119 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-10T08:59:50.119 INFO:teuthology.orchestra.run.vm07.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-10T08:59:50.119 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-10T08:59:50.125 INFO:teuthology.orchestra.run.vm07.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 89.8% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T08:59:50.125 INFO:teuthology.orchestra.run.vm02.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 91.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-10T08:59:50.126 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-10T08:59:50.129 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-10T08:59:50.129 DEBUG:teuthology.orchestra.run.vm02:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T08:59:50.177 DEBUG:teuthology.orchestra.run.vm07:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-10T08:59:50.184 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-10T08:59:50.186 DEBUG:teuthology.orchestra.run.vm02:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T08:59:50.219 DEBUG:teuthology.orchestra.run.vm07:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-10T08:59:50.224 INFO:teuthology.orchestra.run.vm02.stdout:kernel.core_pattern = core 2026-03-10T08:59:50.230 INFO:teuthology.orchestra.run.vm07.stdout:kernel.core_pattern = core 2026-03-10T08:59:50.237 DEBUG:teuthology.orchestra.run.vm02:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T08:59:50.276 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:59:50.276 DEBUG:teuthology.orchestra.run.vm07:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-10T08:59:50.282 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-10T08:59:50.283 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-10T08:59:50.285 INFO:teuthology.task.internal:Transferring archived files... 2026-03-10T08:59:50.286 DEBUG:teuthology.misc:Transferring archived files from vm02:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966/remote/vm02 2026-03-10T08:59:50.286 DEBUG:teuthology.orchestra.run.vm02:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T08:59:50.326 DEBUG:teuthology.misc:Transferring archived files from vm07:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-10_01:00:38-orch-squid-none-default-vps/966/remote/vm07 2026-03-10T08:59:50.326 DEBUG:teuthology.orchestra.run.vm07:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-10T08:59:50.333 INFO:teuthology.task.internal:Removing archive directory... 2026-03-10T08:59:50.333 DEBUG:teuthology.orchestra.run.vm02:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T08:59:50.367 DEBUG:teuthology.orchestra.run.vm07:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-10T08:59:50.379 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-10T08:59:50.382 INFO:teuthology.task.internal:Not uploading archives. 2026-03-10T08:59:50.382 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-10T08:59:50.384 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-10T08:59:50.384 DEBUG:teuthology.orchestra.run.vm02:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T08:59:50.411 DEBUG:teuthology.orchestra.run.vm07:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-10T08:59:50.413 INFO:teuthology.orchestra.run.vm02.stdout: 258067 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Mar 10 08:59 /home/ubuntu/cephtest 2026-03-10T08:59:50.423 INFO:teuthology.orchestra.run.vm07.stdout: 258079 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Mar 10 08:59 /home/ubuntu/cephtest 2026-03-10T08:59:50.424 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-10T08:59:50.430 INFO:teuthology.run:Summary data: description: orch/cephadm/smoke-roleless/{0-distro/ubuntu_22.04 1-start 2-services/rgw-ingress 3-final} duration: 1677.577803850174 owner: kyr success: true 2026-03-10T08:59:50.430 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-10T08:59:50.446 INFO:teuthology.run:pass