2026-03-06T23:51:00.860 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-06T23:51:00.868 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-06T23:51:00.890 INFO:teuthology.run:Config: archive_path: /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416 branch: cobaltcore-storage-v19.2.3-fasttrack-5 description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 1-start 2-services/nfs-ingress-rgw-user 3-final} email: null first_in_suite: false flavor: default job_id: '416' last_in_suite: false machine_type: vps name: irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: cobaltcore-storage-v19.2.3-fasttrack-5 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: Europe/Berlin ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 install: ceph: flavor: default sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a extra_system_packages: deb: - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph/rpm-19.2.3-39-g340d3c24fc6/el9.clyso/x86_64 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-19.2.3-fasttrack-5-no-nvme-loop sha1: b952d7263a165ada4530724b87fab57a8f3f547b owner: irq0 priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 9421 sha1: 340d3c24fc6ae7529322dc7ccee6c6cb2589da0a sleep_before_teardown: 0 suite: orch:cephadm:smoke-roleless suite_branch: tt-19.2.3-fasttrack-5-no-nvme-loop suite_path: /home/teuthos/src/github.com_kshtsk_ceph_b952d7263a165ada4530724b87fab57a8f3f547b/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: b952d7263a165ada4530724b87fab57a8f3f547b targets: vm03.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBwJ4Tx8GQS3u4y3EGKrHJimp3i7LUoFAugwxAOC/5xImENzv4iOEZ5ENBHuc8HwknNuFqh/KElTGuhXofP/3nE= vm08.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBCkUDig1ft1U8aN3Juln252BhNZfe6cqCqrw1U/vMoaeg/hMjKpQFx17nAsvTKZWY0dS31UqB+xcZzWIuthfWw= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph orch apply rgw foorgw --port 8800 - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} - vip.exec: host.a: - dnf install -y python3-boto3 || apt install -y python3-boto3 - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json - python: host.a: "import boto3\nimport json\n\nwith open('/tmp/user.json', 'rt') as f:\n\ \ info = json.loads(f.read())\ns3 = boto3.resource(\n 's3',\n aws_access_key_id=info['keys'][0]['access_key'],\n\ \ aws_secret_access_key=info['keys'][0]['secret_key'],\n endpoint_url='http://localhost:8800',\n\ )\nbucket = s3.Bucket('foobucket')\nbucket.create()\nbucket.put_object(Key='myobject',\ \ Body='thebody')\n" - cephadm.shell: host.a: - ceph nfs export create rgw --cluster-id foo --pseudo-path /foouser --user-id foouser - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/foouser /mnt/foo - test -d /mnt/foo/foobucket - find /mnt/foo -ls - grep thebody /mnt/foo/foobucket/myobject - echo test > /mnt/foo/foobucket/newobject - sync - python: host.a: "import boto3\nimport json\nfrom io import BytesIO\n\nwith open('/tmp/user.json',\ \ 'rt') as f:\n info = json.loads(f.read())\ns3 = boto3.resource(\n 's3',\n\ \ aws_access_key_id=info['keys'][0]['access_key'],\n aws_secret_access_key=info['keys'][0]['secret_key'],\n\ \ endpoint_url='http://localhost:8800',\n)\nbucket = s3.Bucket('foobucket')\n\ data = BytesIO()\nbucket.download_fileobj(Fileobj=data, Key='newobject')\nprint(data.getvalue())\n\ assert data.getvalue().decode() == 'test\\n'\n" - vip.exec: host.a: - umount /mnt/foo - cephadm.shell: host.a: - ceph nfs export rm foo /foouser - ceph nfs cluster rm foo - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-06_20:21:59 tube: vps user: irq0 verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.43333 2026-03-06T23:51:00.890 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_b952d7263a165ada4530724b87fab57a8f3f547b/qa; will attempt to use it 2026-03-06T23:51:00.890 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_b952d7263a165ada4530724b87fab57a8f3f547b/qa/tasks 2026-03-06T23:51:00.890 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-06T23:51:00.891 INFO:teuthology.task.internal:Saving configuration 2026-03-06T23:51:00.896 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-06T23:51:00.897 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-06T23:51:00.903 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm03.local', 'description': '/archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-06 22:49:53.136332', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:03', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBwJ4Tx8GQS3u4y3EGKrHJimp3i7LUoFAugwxAOC/5xImENzv4iOEZ5ENBHuc8HwknNuFqh/KElTGuhXofP/3nE='} 2026-03-06T23:51:00.908 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm08.local', 'description': '/archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-06 22:49:53.136753', 'locked_by': 'irq0', 'mac_address': '52:55:00:00:00:08', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBCkUDig1ft1U8aN3Juln252BhNZfe6cqCqrw1U/vMoaeg/hMjKpQFx17nAsvTKZWY0dS31UqB+xcZzWIuthfWw='} 2026-03-06T23:51:00.908 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-06T23:51:00.908 INFO:teuthology.task.internal:roles: ubuntu@vm03.local - ['host.a', 'client.0'] 2026-03-06T23:51:00.908 INFO:teuthology.task.internal:roles: ubuntu@vm08.local - ['host.b', 'client.1'] 2026-03-06T23:51:00.908 INFO:teuthology.run_tasks:Running task console_log... 2026-03-06T23:51:00.914 DEBUG:teuthology.task.console_log:vm03 does not support IPMI; excluding 2026-03-06T23:51:00.919 DEBUG:teuthology.task.console_log:vm08 does not support IPMI; excluding 2026-03-06T23:51:00.919 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fe6a358bd90>, signals=[15]) 2026-03-06T23:51:00.919 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-06T23:51:00.919 INFO:teuthology.task.internal:Opening connections... 2026-03-06T23:51:00.920 DEBUG:teuthology.task.internal:connecting to ubuntu@vm03.local 2026-03-06T23:51:00.920 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T23:51:00.980 DEBUG:teuthology.task.internal:connecting to ubuntu@vm08.local 2026-03-06T23:51:00.980 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T23:51:01.037 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-06T23:51:01.038 DEBUG:teuthology.orchestra.run.vm03:> uname -m 2026-03-06T23:51:01.082 INFO:teuthology.orchestra.run.vm03.stdout:x86_64 2026-03-06T23:51:01.082 DEBUG:teuthology.orchestra.run.vm03:> cat /etc/os-release 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:NAME="CentOS Stream" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:VERSION="9" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:ID="centos" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:ID_LIKE="rhel fedora" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:VERSION_ID="9" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:PLATFORM_ID="platform:el9" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:ANSI_COLOR="0;31" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:LOGO="fedora-logo-icon" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:HOME_URL="https://centos.org/" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-06T23:51:01.137 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-06T23:51:01.138 INFO:teuthology.orchestra.run.vm03.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-06T23:51:01.138 INFO:teuthology.lock.ops:Updating vm03.local on lock server 2026-03-06T23:51:01.143 DEBUG:teuthology.orchestra.run.vm08:> uname -m 2026-03-06T23:51:01.161 INFO:teuthology.orchestra.run.vm08.stdout:x86_64 2026-03-06T23:51:01.161 DEBUG:teuthology.orchestra.run.vm08:> cat /etc/os-release 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:NAME="CentOS Stream" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:VERSION="9" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:ID="centos" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:ID_LIKE="rhel fedora" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:VERSION_ID="9" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:PLATFORM_ID="platform:el9" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:ANSI_COLOR="0;31" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:LOGO="fedora-logo-icon" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:HOME_URL="https://centos.org/" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-06T23:51:01.216 INFO:teuthology.orchestra.run.vm08.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-06T23:51:01.216 INFO:teuthology.lock.ops:Updating vm08.local on lock server 2026-03-06T23:51:01.220 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-06T23:51:01.222 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-06T23:51:01.223 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-06T23:51:01.223 DEBUG:teuthology.orchestra.run.vm03:> test '!' -e /home/ubuntu/cephtest 2026-03-06T23:51:01.224 DEBUG:teuthology.orchestra.run.vm08:> test '!' -e /home/ubuntu/cephtest 2026-03-06T23:51:01.270 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-06T23:51:01.271 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-06T23:51:01.271 DEBUG:teuthology.orchestra.run.vm03:> test -z $(ls -A /var/lib/ceph) 2026-03-06T23:51:01.279 DEBUG:teuthology.orchestra.run.vm08:> test -z $(ls -A /var/lib/ceph) 2026-03-06T23:51:01.292 INFO:teuthology.orchestra.run.vm03.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-06T23:51:01.325 INFO:teuthology.orchestra.run.vm08.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-06T23:51:01.325 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-06T23:51:01.333 DEBUG:teuthology.orchestra.run.vm03:> test -e /ceph-qa-ready 2026-03-06T23:51:01.347 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:51:01.535 DEBUG:teuthology.orchestra.run.vm08:> test -e /ceph-qa-ready 2026-03-06T23:51:01.549 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:51:01.739 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-06T23:51:01.740 INFO:teuthology.task.internal:Creating test directory... 2026-03-06T23:51:01.740 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-06T23:51:01.742 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-06T23:51:01.757 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-06T23:51:01.758 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-06T23:51:01.759 INFO:teuthology.task.internal:Creating archive directory... 2026-03-06T23:51:01.759 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-06T23:51:01.798 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-06T23:51:01.818 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-06T23:51:01.819 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-06T23:51:01.820 DEBUG:teuthology.orchestra.run.vm03:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-06T23:51:01.871 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:51:01.871 DEBUG:teuthology.orchestra.run.vm08:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-06T23:51:01.886 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:51:01.886 DEBUG:teuthology.orchestra.run.vm03:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-06T23:51:01.913 DEBUG:teuthology.orchestra.run.vm08:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-06T23:51:01.938 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T23:51:01.950 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T23:51:01.952 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T23:51:01.962 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-06T23:51:01.963 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-06T23:51:01.964 INFO:teuthology.task.internal:Configuring sudo... 2026-03-06T23:51:01.964 DEBUG:teuthology.orchestra.run.vm03:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-06T23:51:01.994 DEBUG:teuthology.orchestra.run.vm08:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-06T23:51:02.030 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-06T23:51:02.033 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-06T23:51:02.033 DEBUG:teuthology.orchestra.run.vm03:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-06T23:51:02.062 DEBUG:teuthology.orchestra.run.vm08:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-06T23:51:02.086 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T23:51:02.139 DEBUG:teuthology.orchestra.run.vm03:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T23:51:02.195 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:51:02.195 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-06T23:51:02.253 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T23:51:02.276 DEBUG:teuthology.orchestra.run.vm08:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T23:51:02.333 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-06T23:51:02.333 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-06T23:51:02.393 DEBUG:teuthology.orchestra.run.vm03:> sudo service rsyslog restart 2026-03-06T23:51:02.395 DEBUG:teuthology.orchestra.run.vm08:> sudo service rsyslog restart 2026-03-06T23:51:02.424 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T23:51:02.463 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T23:51:02.772 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-06T23:51:02.773 INFO:teuthology.task.internal:Starting timer... 2026-03-06T23:51:02.774 INFO:teuthology.run_tasks:Running task pcp... 2026-03-06T23:51:02.776 INFO:teuthology.run_tasks:Running task selinux... 2026-03-06T23:51:02.778 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-03-06T23:51:02.778 INFO:teuthology.task.selinux:Excluding vm03: VMs are not yet supported 2026-03-06T23:51:02.778 INFO:teuthology.task.selinux:Excluding vm08: VMs are not yet supported 2026-03-06T23:51:02.778 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-06T23:51:02.778 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-06T23:51:02.778 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-06T23:51:02.778 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-06T23:51:02.780 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'Europe/Berlin'}} 2026-03-06T23:51:02.780 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-03-06T23:51:02.781 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-03-06T23:51:03.239 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-03-06T23:51:03.244 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-06T23:51:03.245 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "Europe/Berlin"}' -i /tmp/teuth_ansible_inventory_95zb6pr --limit vm03.local,vm08.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-06T23:52:54.399 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm03.local'), Remote(name='ubuntu@vm08.local')] 2026-03-06T23:52:54.399 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm03.local' 2026-03-06T23:52:54.399 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm03.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T23:52:54.467 DEBUG:teuthology.orchestra.run.vm03:> true 2026-03-06T23:52:54.549 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm03.local' 2026-03-06T23:52:54.549 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm08.local' 2026-03-06T23:52:54.550 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm08.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-06T23:52:54.610 DEBUG:teuthology.orchestra.run.vm08:> true 2026-03-06T23:52:54.688 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm08.local' 2026-03-06T23:52:54.688 INFO:teuthology.run_tasks:Running task clock... 2026-03-06T23:52:54.691 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-06T23:52:54.691 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-06T23:52:54.691 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T23:52:54.693 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-06T23:52:54.693 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T23:52:54.726 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-06T23:52:54.743 INFO:teuthology.orchestra.run.vm03.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-06T23:52:54.765 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-06T23:52:54.772 INFO:teuthology.orchestra.run.vm03.stderr:sudo: ntpd: command not found 2026-03-06T23:52:54.777 INFO:teuthology.orchestra.run.vm08.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-06T23:52:54.785 INFO:teuthology.orchestra.run.vm03.stdout:506 Cannot talk to daemon 2026-03-06T23:52:54.801 INFO:teuthology.orchestra.run.vm08.stderr:sudo: ntpd: command not found 2026-03-06T23:52:54.801 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-06T23:52:54.811 INFO:teuthology.orchestra.run.vm08.stdout:506 Cannot talk to daemon 2026-03-06T23:52:54.817 INFO:teuthology.orchestra.run.vm03.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-06T23:52:54.825 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-06T23:52:54.841 INFO:teuthology.orchestra.run.vm08.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-06T23:52:54.867 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-06T23:52:54.869 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T23:52:54.869 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-06T23:52:54.887 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-06T23:52:54.889 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T23:52:54.889 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-06T23:52:54.890 INFO:teuthology.run_tasks:Running task pexec... 2026-03-06T23:52:54.892 INFO:teuthology.task.pexec:Executing custom commands... 2026-03-06T23:52:54.892 DEBUG:teuthology.orchestra.run.vm03:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-06T23:52:54.892 DEBUG:teuthology.orchestra.run.vm08:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-03-06T23:52:54.911 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf remove nvme-cli -y 2026-03-06T23:52:54.911 DEBUG:teuthology.task.pexec:ubuntu@vm03.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-06T23:52:54.911 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm03.local 2026-03-06T23:52:54.912 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-06T23:52:54.912 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-06T23:52:54.932 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo dnf remove nvme-cli -y 2026-03-06T23:52:54.932 DEBUG:teuthology.task.pexec:ubuntu@vm08.local< sudo dnf install nvmetcli nvme-cli -y 2026-03-06T23:52:54.932 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm08.local 2026-03-06T23:52:54.932 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-03-06T23:52:54.932 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-03-06T23:52:55.142 INFO:teuthology.orchestra.run.vm03.stdout:No match for argument: nvme-cli 2026-03-06T23:52:55.143 INFO:teuthology.orchestra.run.vm03.stderr:No packages marked for removal. 2026-03-06T23:52:55.148 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-06T23:52:55.149 INFO:teuthology.orchestra.run.vm03.stdout:Nothing to do. 2026-03-06T23:52:55.149 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-06T23:52:55.171 INFO:teuthology.orchestra.run.vm08.stdout:No match for argument: nvme-cli 2026-03-06T23:52:55.171 INFO:teuthology.orchestra.run.vm08.stderr:No packages marked for removal. 2026-03-06T23:52:55.178 INFO:teuthology.orchestra.run.vm08.stdout:Dependencies resolved. 2026-03-06T23:52:55.179 INFO:teuthology.orchestra.run.vm08.stdout:Nothing to do. 2026-03-06T23:52:55.179 INFO:teuthology.orchestra.run.vm08.stdout:Complete! 2026-03-06T23:52:55.603 INFO:teuthology.orchestra.run.vm08.stdout:Last metadata expiration check: 0:01:07 ago on Fri 06 Mar 2026 11:51:48 PM CET. 2026-03-06T23:52:55.621 INFO:teuthology.orchestra.run.vm03.stdout:Last metadata expiration check: 0:01:03 ago on Fri 06 Mar 2026 11:51:52 PM CET. 2026-03-06T23:52:55.729 INFO:teuthology.orchestra.run.vm08.stdout:Dependencies resolved. 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: Package Architecture Version Repository Size 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:Installing: 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:Installing dependencies: 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:Transaction Summary 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:================================================================================ 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:Install 6 Packages 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:Total download size: 2.3 M 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:Installed size: 11 M 2026-03-06T23:52:55.730 INFO:teuthology.orchestra.run.vm08.stdout:Downloading Packages: 2026-03-06T23:52:55.746 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-06T23:52:55.746 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-06T23:52:55.746 INFO:teuthology.orchestra.run.vm03.stdout: Package Architecture Version Repository Size 2026-03-06T23:52:55.746 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-06T23:52:55.746 INFO:teuthology.orchestra.run.vm03.stdout:Installing: 2026-03-06T23:52:55.746 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout:Installing dependencies: 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout:Transaction Summary 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout:Install 6 Packages 2026-03-06T23:52:55.747 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:52:55.750 INFO:teuthology.orchestra.run.vm03.stdout:Total download size: 2.3 M 2026-03-06T23:52:55.750 INFO:teuthology.orchestra.run.vm03.stdout:Installed size: 11 M 2026-03-06T23:52:55.750 INFO:teuthology.orchestra.run.vm03.stdout:Downloading Packages: 2026-03-06T23:52:56.249 INFO:teuthology.orchestra.run.vm08.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 788 kB/s | 44 kB 00:00 2026-03-06T23:52:56.258 INFO:teuthology.orchestra.run.vm08.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 1.1 MB/s | 72 kB 00:00 2026-03-06T23:52:56.281 INFO:teuthology.orchestra.run.vm08.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 2.6 MB/s | 84 kB 00:00 2026-03-06T23:52:56.295 INFO:teuthology.orchestra.run.vm08.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 3.9 MB/s | 150 kB 00:00 2026-03-06T23:52:56.317 INFO:teuthology.orchestra.run.vm08.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 9.3 MB/s | 1.2 MB 00:00 2026-03-06T23:52:56.339 INFO:teuthology.orchestra.run.vm03.stdout:(1/6): python3-configshell-1.1.30-1.el9.noarch. 599 kB/s | 72 kB 00:00 2026-03-06T23:52:56.342 INFO:teuthology.orchestra.run.vm08.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 14 MB/s | 837 kB 00:00 2026-03-06T23:52:56.342 INFO:teuthology.orchestra.run.vm08.stdout:-------------------------------------------------------------------------------- 2026-03-06T23:52:56.342 INFO:teuthology.orchestra.run.vm08.stdout:Total 3.8 MB/s | 2.3 MB 00:00 2026-03-06T23:52:56.362 INFO:teuthology.orchestra.run.vm03.stdout:(2/6): nvmetcli-0.8-3.el9.noarch.rpm 308 kB/s | 44 kB 00:00 2026-03-06T23:52:56.377 INFO:teuthology.orchestra.run.vm03.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 2.2 MB/s | 84 kB 00:00 2026-03-06T23:52:56.406 INFO:teuthology.orchestra.run.vm03.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 3.3 MB/s | 150 kB 00:00 2026-03-06T23:52:56.416 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction check 2026-03-06T23:52:56.417 INFO:teuthology.orchestra.run.vm03.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 5.8 MB/s | 1.2 MB 00:00 2026-03-06T23:52:56.421 INFO:teuthology.orchestra.run.vm08.stdout:Transaction check succeeded. 2026-03-06T23:52:56.421 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction test 2026-03-06T23:52:56.457 INFO:teuthology.orchestra.run.vm03.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 10 MB/s | 837 kB 00:00 2026-03-06T23:52:56.459 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-06T23:52:56.459 INFO:teuthology.orchestra.run.vm03.stdout:Total 3.3 MB/s | 2.3 MB 00:00 2026-03-06T23:52:56.481 INFO:teuthology.orchestra.run.vm08.stdout:Transaction test succeeded. 2026-03-06T23:52:56.481 INFO:teuthology.orchestra.run.vm08.stdout:Running transaction 2026-03-06T23:52:56.524 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction check 2026-03-06T23:52:56.533 INFO:teuthology.orchestra.run.vm03.stdout:Transaction check succeeded. 2026-03-06T23:52:56.533 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction test 2026-03-06T23:52:56.594 INFO:teuthology.orchestra.run.vm03.stdout:Transaction test succeeded. 2026-03-06T23:52:56.595 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction 2026-03-06T23:52:56.655 INFO:teuthology.orchestra.run.vm08.stdout: Preparing : 1/1 2026-03-06T23:52:56.669 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-06T23:52:56.683 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-06T23:52:56.693 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T23:52:56.703 INFO:teuthology.orchestra.run.vm08.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T23:52:56.705 INFO:teuthology.orchestra.run.vm08.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T23:52:56.774 INFO:teuthology.orchestra.run.vm03.stdout: Preparing : 1/1 2026-03-06T23:52:56.784 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-03-06T23:52:56.798 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-03-06T23:52:56.807 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T23:52:56.818 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T23:52:56.820 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T23:52:56.887 INFO:teuthology.orchestra.run.vm08.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T23:52:56.893 INFO:teuthology.orchestra.run.vm08.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T23:52:56.999 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-03-06T23:52:57.006 INFO:teuthology.orchestra.run.vm03.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T23:52:57.362 INFO:teuthology.orchestra.run.vm08.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T23:52:57.362 INFO:teuthology.orchestra.run.vm08.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-06T23:52:57.362 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:52:57.445 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-03-06T23:52:57.445 INFO:teuthology.orchestra.run.vm03.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-03-06T23:52:57.445 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:52:57.941 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-06T23:52:57.941 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-06T23:52:57.941 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T23:52:57.941 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T23:52:57.941 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-06T23:52:57.982 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-03-06T23:52:57.982 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-03-06T23:52:57.982 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-03-06T23:52:57.982 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-03-06T23:52:57.982 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout:Installed: 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:52:58.040 INFO:teuthology.orchestra.run.vm08.stdout:Complete! 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout:Installed: 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:52:58.079 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-06T23:52:58.124 DEBUG:teuthology.parallel:result is None 2026-03-06T23:52:58.151 DEBUG:teuthology.parallel:result is None 2026-03-06T23:52:58.151 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-06T23:52:58.194 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '340d3c24fc6ae7529322dc7ccee6c6cb2589da0a', 'cephadm_binary_url': 'https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5'}} 2026-03-06T23:52:58.194 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-03-06T23:52:58.194 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 2026-03-06T23:52:58.194 INFO:tasks.cephadm:Cluster fsid is 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:52:58.194 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-06T23:52:58.194 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-03-06T23:52:58.194 INFO:tasks.cephadm:Monitor IPs: {'mon.vm03': '192.168.123.103', 'mon.vm08': '192.168.123.108'} 2026-03-06T23:52:58.194 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-06T23:52:58.194 DEBUG:teuthology.orchestra.run.vm03:> sudo hostname $(hostname -s) 2026-03-06T23:52:58.238 DEBUG:teuthology.orchestra.run.vm08:> sudo hostname $(hostname -s) 2026-03-06T23:52:58.287 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm 2026-03-06T23:52:58.287 DEBUG:teuthology.orchestra.run.vm03:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-06T23:52:59.423 INFO:teuthology.orchestra.run.vm03.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 6 23:52 /home/ubuntu/cephtest/cephadm 2026-03-06T23:52:59.423 DEBUG:teuthology.orchestra.run.vm08:> curl --silent -L https://download.ceph.com/rpm-19.2.3/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-06T23:53:00.467 INFO:teuthology.orchestra.run.vm08.stdout:-rw-r--r--. 1 ubuntu ubuntu 787672 Mar 6 23:53 /home/ubuntu/cephtest/cephadm 2026-03-06T23:53:00.467 DEBUG:teuthology.orchestra.run.vm03:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-06T23:53:00.489 DEBUG:teuthology.orchestra.run.vm08:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-06T23:53:00.515 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 on all hosts... 2026-03-06T23:53:00.515 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-06T23:53:00.531 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 pull 2026-03-06T23:53:00.877 INFO:teuthology.orchestra.run.vm03.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T23:53:00.981 INFO:teuthology.orchestra.run.vm08.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T23:53:22.088 INFO:teuthology.orchestra.run.vm08.stdout:{ 2026-03-06T23:53:22.088 INFO:teuthology.orchestra.run.vm08.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-06T23:53:22.088 INFO:teuthology.orchestra.run.vm08.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-06T23:53:22.088 INFO:teuthology.orchestra.run.vm08.stdout: "repo_digests": [ 2026-03-06T23:53:22.088 INFO:teuthology.orchestra.run.vm08.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-06T23:53:22.088 INFO:teuthology.orchestra.run.vm08.stdout: ] 2026-03-06T23:53:22.088 INFO:teuthology.orchestra.run.vm08.stdout:} 2026-03-06T23:53:22.178 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-06T23:53:22.178 INFO:teuthology.orchestra.run.vm03.stdout: "ceph_version": "ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable)", 2026-03-06T23:53:22.178 INFO:teuthology.orchestra.run.vm03.stdout: "image_id": "8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1", 2026-03-06T23:53:22.178 INFO:teuthology.orchestra.run.vm03.stdout: "repo_digests": [ 2026-03-06T23:53:22.178 INFO:teuthology.orchestra.run.vm03.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0" 2026-03-06T23:53:22.178 INFO:teuthology.orchestra.run.vm03.stdout: ] 2026-03-06T23:53:22.178 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-06T23:53:22.204 DEBUG:teuthology.orchestra.run.vm03:> sudo mkdir -p /etc/ceph 2026-03-06T23:53:22.238 DEBUG:teuthology.orchestra.run.vm08:> sudo mkdir -p /etc/ceph 2026-03-06T23:53:22.272 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 777 /etc/ceph 2026-03-06T23:53:22.306 DEBUG:teuthology.orchestra.run.vm08:> sudo chmod 777 /etc/ceph 2026-03-06T23:53:22.340 INFO:tasks.cephadm:Writing seed config... 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-06T23:53:22.340 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-03-06T23:53:22.340 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:53:22.340 DEBUG:teuthology.orchestra.run.vm03:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-06T23:53:22.368 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 386eb88a-19af-11f1-876d-93c9c802cc09 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-06T23:53:22.369 DEBUG:teuthology.orchestra.run.vm03:mon.vm03> sudo journalctl -f -n 0 -u ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm03.service 2026-03-06T23:53:22.411 INFO:tasks.cephadm:Bootstrapping... 2026-03-06T23:53:22.411 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 -v bootstrap --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.103 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-06T23:53:22.722 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-06T23:53:22.722 INFO:teuthology.orchestra.run.vm03.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5', '-v', 'bootstrap', '--fsid', '386eb88a-19af-11f1-876d-93c9c802cc09', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.103', '--skip-admin-label'] 2026-03-06T23:53:22.722 INFO:teuthology.orchestra.run.vm03.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-03-06T23:53:22.722 INFO:teuthology.orchestra.run.vm03.stdout:Verifying podman|docker is present... 2026-03-06T23:53:22.750 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-06T23:53:22.750 INFO:teuthology.orchestra.run.vm03.stdout:Verifying lvm2 is present... 2026-03-06T23:53:22.750 INFO:teuthology.orchestra.run.vm03.stdout:Verifying time synchronization is in place... 2026-03-06T23:53:22.758 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-06T23:53:22.758 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-06T23:53:22.767 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-06T23:53:22.767 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-06T23:53:22.776 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-06T23:53:22.785 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-06T23:53:22.785 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-06T23:53:22.785 INFO:teuthology.orchestra.run.vm03.stdout:Repeating the final host check... 2026-03-06T23:53:22.811 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 5.8.0 2026-03-06T23:53:22.811 INFO:teuthology.orchestra.run.vm03.stdout:podman (/bin/podman) version 5.8.0 is present 2026-03-06T23:53:22.811 INFO:teuthology.orchestra.run.vm03.stdout:systemctl is present 2026-03-06T23:53:22.811 INFO:teuthology.orchestra.run.vm03.stdout:lvcreate is present 2026-03-06T23:53:22.818 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-03-06T23:53:22.818 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-03-06T23:53:22.824 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-03-06T23:53:22.824 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout inactive 2026-03-06T23:53:22.830 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout enabled 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stdout active 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:Unit chronyd.service is enabled and running 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:Host looks OK 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:Cluster fsid: 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:Acquiring lock 139949820906944 on /run/cephadm/386eb88a-19af-11f1-876d-93c9c802cc09.lock 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:Lock 139949820906944 acquired on /run/cephadm/386eb88a-19af-11f1-876d-93c9c802cc09.lock 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 3300 ... 2026-03-06T23:53:22.837 INFO:teuthology.orchestra.run.vm03.stdout:Verifying IP 192.168.123.103 port 6789 ... 2026-03-06T23:53:22.838 INFO:teuthology.orchestra.run.vm03.stdout:Base mon IP(s) is [192.168.123.103:3300, 192.168.123.103:6789], mon addrv is [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-06T23:53:22.841 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.103 metric 100 2026-03-06T23:53:22.841 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.103 metric 100 2026-03-06T23:53:22.844 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-03-06T23:53:22.844 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:3/64 scope link noprefixroute 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:Mon IP `192.168.123.103` is in CIDR network `192.168.123.0/24` 2026-03-06T23:53:22.847 INFO:teuthology.orchestra.run.vm03.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-03-06T23:53:22.848 INFO:teuthology.orchestra.run.vm03.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-06T23:53:22.848 INFO:teuthology.orchestra.run.vm03.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T23:53:23.505 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stdout 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-06T23:53:23.505 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5... 2026-03-06T23:53:23.505 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Getting image source signatures 2026-03-06T23:53:23.505 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying blob sha256:89f108f95c9b33ae21c5514f17c1bd5ca646e21d3c5e8ac1e117cf65bcd40261 2026-03-06T23:53:23.505 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Copying config sha256:8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 2026-03-06T23:53:23.505 INFO:teuthology.orchestra.run.vm03.stdout:/bin/podman: stderr Writing manifest to image destination 2026-03-06T23:53:23.755 INFO:teuthology.orchestra.run.vm03.stdout:ceph: stdout ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-06T23:53:23.755 INFO:teuthology.orchestra.run.vm03.stdout:Ceph version: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable) 2026-03-06T23:53:23.755 INFO:teuthology.orchestra.run.vm03.stdout:Extracting ceph user uid/gid from container image... 2026-03-06T23:53:23.859 INFO:teuthology.orchestra.run.vm03.stdout:stat: stdout 167 167 2026-03-06T23:53:23.859 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial keys... 2026-03-06T23:53:23.959 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQDjWqtpff6INxAA4IuN/JgmUqIEr9TBlv5BdA== 2026-03-06T23:53:24.070 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQDkWqtpoxXhAhAAtUBfU8VnilciFJD9mYy6gQ== 2026-03-06T23:53:24.184 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph-authtool: stdout AQDkWqtpKfVsCRAAzZOrSNk0aO6R31UXbdVagg== 2026-03-06T23:53:24.184 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial monmap... 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = quincy 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:monmaptool for vm03 [v2:192.168.123.103:3300,v1:192.168.123.103:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:setting min_mon_release = quincy 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: set fsid to 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:53:24.306 INFO:teuthology.orchestra.run.vm03.stdout:Creating mon... 2026-03-06T23:53:24.437 INFO:teuthology.orchestra.run.vm03.stdout:create mon.vm03 on 2026-03-06T23:53:24.785 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-06T23:53:24.939 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-386eb88a-19af-11f1-876d-93c9c802cc09.target → /etc/systemd/system/ceph-386eb88a-19af-11f1-876d-93c9c802cc09.target. 2026-03-06T23:53:24.939 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-386eb88a-19af-11f1-876d-93c9c802cc09.target → /etc/systemd/system/ceph-386eb88a-19af-11f1-876d-93c9c802cc09.target. 2026-03-06T23:53:25.128 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm03 2026-03-06T23:53:25.128 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm03.service: Unit ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm03.service not loaded. 2026-03-06T23:53:25.282 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-386eb88a-19af-11f1-876d-93c9c802cc09.target.wants/ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm03.service → /etc/systemd/system/ceph-386eb88a-19af-11f1-876d-93c9c802cc09@.service. 2026-03-06T23:53:25.492 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-06T23:53:25.492 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-06T23:53:25.492 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon to start... 2026-03-06T23:53:25.492 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mon... 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout cluster: 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout id: 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout services: 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm03 (age 0.24026s) 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout data: 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout pgs: 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:mon is available 2026-03-06T23:53:25.815 INFO:teuthology.orchestra.run.vm03.stdout:Assimilating anything we can from ceph.conf... 2026-03-06T23:53:26.110 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:26.110 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-06T23:53:26.110 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:26.110 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-06T23:53:26.111 INFO:teuthology.orchestra.run.vm03.stdout:Generating new minimal ceph.conf... 2026-03-06T23:53:26.416 INFO:teuthology.orchestra.run.vm03.stdout:Restarting the monitor... 2026-03-06T23:53:28.232 INFO:teuthology.orchestra.run.vm03.stdout:Setting public_network to 192.168.123.0/24 in mon config section 2026-03-06T23:53:28.263 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm03/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x557b96fdae00 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: rocksdb: DB pointer 0x557b970f6000 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** DB Stats ** 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** Compaction Stats [default] ** 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: L0 2/0 72.52 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 23.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Sum 2/0 72.52 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 23.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 23.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** Compaction Stats [default] ** 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 23.3 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Cumulative compaction: 0.00 GB write, 2.48 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Interval compaction: 0.00 GB write, 2.48 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Block cache BinnedLRUCache@0x557b96fd91f0#2 capacity: 512.00 MB usage: 26.17 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 2.1e-05 secs_since: 0 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: Block cache entry stats(count,size,portion): DataBlock(3,25.11 KB,0.00478923%) FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: mon.vm03 is new leader, mons vm03 in quorum (ranks 0) 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: monmap epoch 1 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: fsid 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: last_changed 2026-03-06T22:53:24.279325+0000 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: created 2026-03-06T22:53:24.279325+0000 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: min_mon_release 19 (squid) 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: election_strategy: 1 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.vm03 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: fsmap 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: osdmap e1: 0 total, 0 up, 0 in 2026-03-06T23:53:28.513 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:28 vm03 ceph-mon[48028]: mgrmap e1: no daemons active 2026-03-06T23:53:28.571 INFO:teuthology.orchestra.run.vm03.stdout:Wrote config to /etc/ceph/ceph.conf 2026-03-06T23:53:28.571 INFO:teuthology.orchestra.run.vm03.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-06T23:53:28.571 INFO:teuthology.orchestra.run.vm03.stdout:Creating mgr... 2026-03-06T23:53:28.572 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:9283 ... 2026-03-06T23:53:28.572 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:8765 ... 2026-03-06T23:53:28.572 INFO:teuthology.orchestra.run.vm03.stdout:Verifying port 0.0.0.0:8443 ... 2026-03-06T23:53:28.742 INFO:teuthology.orchestra.run.vm03.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mgr.vm03.xzkqce 2026-03-06T23:53:28.742 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Failed to reset failed state of unit ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mgr.vm03.xzkqce.service: Unit ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mgr.vm03.xzkqce.service not loaded. 2026-03-06T23:53:28.872 INFO:teuthology.orchestra.run.vm03.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-386eb88a-19af-11f1-876d-93c9c802cc09.target.wants/ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mgr.vm03.xzkqce.service → /etc/systemd/system/ceph-386eb88a-19af-11f1-876d-93c9c802cc09@.service. 2026-03-06T23:53:29.059 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-06T23:53:29.059 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to enable service . firewalld.service is not available 2026-03-06T23:53:29.059 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-06T23:53:29.059 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-03-06T23:53:29.059 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr to start... 2026-03-06T23:53:29.059 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr... 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "386eb88a-19af-11f1-876d-93c9c802cc09", 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T23:53:29.445 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T22:53:25:536236+0000", 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:29.446 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T22:53:25.536853+0000", 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:29.447 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (1/15)... 2026-03-06T23:53:29.770 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:29 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2910486978' entity='client.admin' 2026-03-06T23:53:29.770 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:29 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1936176935' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "386eb88a-19af-11f1-876d-93c9c802cc09", 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T23:53:31.853 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T22:53:25:536236+0000", 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T22:53:25.536853+0000", 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:31.854 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (2/15)... 2026-03-06T23:53:32.042 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:31 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3939074429' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "386eb88a-19af-11f1-876d-93c9c802cc09", 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-06T23:53:34.421 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 6, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T23:53:34.422 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T22:53:25:536236+0000", 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T22:53:25.536853+0000", 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:34.423 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (3/15)... 2026-03-06T23:53:34.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:34 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1925853264' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "386eb88a-19af-11f1-876d-93c9c802cc09", 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 8, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T23:53:36.864 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T22:53:25:536236+0000", 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:36.866 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T22:53:25.536853+0000", 2026-03-06T23:53:36.867 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:36.867 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:36.867 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T23:53:36.867 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:36.867 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (4/15)... 2026-03-06T23:53:37.120 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:36 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/174364035' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "386eb88a-19af-11f1-876d-93c9c802cc09", 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:39.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T22:53:25:536236+0000", 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": false, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T22:53:25.536853+0000", 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:39.213 INFO:teuthology.orchestra.run.vm03.stdout:mgr not available, waiting (5/15)... 2026-03-06T23:53:39.430 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:39 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1583060641' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: Activating manager daemon vm03.xzkqce 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: mgrmap e2: vm03.xzkqce(active, starting, since 0.00477912s) 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr metadata", "who": "vm03.xzkqce", "id": "vm03.xzkqce"}]: dispatch 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: Manager daemon vm03.xzkqce is now available 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/mirror_snapshot_schedule"}]: dispatch 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:40.450 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:40 vm03 ceph-mon[48028]: from='mgr.14100 192.168.123.103:0/4141098752' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/trash_purge_schedule"}]: dispatch 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsid": "386eb88a-19af-11f1-876d-93c9c802cc09", 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "health": { 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 0 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "vm03" 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "quorum_age": 13, 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "monmap": { 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "squid", 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:41.608 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "btime": "2026-03-06T22:53:25:536236+0000", 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modules": [ 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "iostat", 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "nfs", 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "restful" 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ], 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "modified": "2026-03-06T22:53:25.536853+0000", 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "services": {} 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout }, 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:41.609 INFO:teuthology.orchestra.run.vm03.stdout:mgr is available 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [global] 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout fsid = 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.103:3300,v1:192.168.123.103:6789] 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [mgr] 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout [osd] 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-03-06T23:53:42.012 INFO:teuthology.orchestra.run.vm03.stdout:Enabling cephadm module... 2026-03-06T23:53:42.218 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:42 vm03 ceph-mon[48028]: mgrmap e3: vm03.xzkqce(active, since 1.00848s) 2026-03-06T23:53:42.218 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:42 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/123931105' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-06T23:53:42.219 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:42 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2181311764' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-06T23:53:43.348 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:43 vm03 ceph-mon[48028]: mgrmap e4: vm03.xzkqce(active, since 2s) 2026-03-06T23:53:43.348 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:43 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1798496097' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-06T23:53:43.461 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:43.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-03-06T23:53:43.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-06T23:53:43.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "vm03.xzkqce", 2026-03-06T23:53:43.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-06T23:53:43.462 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:43.462 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-06T23:53:43.462 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 5... 2026-03-06T23:53:44.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:44 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1798496097' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-06T23:53:44.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:44 vm03 ceph-mon[48028]: mgrmap e5: vm03.xzkqce(active, since 3s) 2026-03-06T23:53:44.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:44 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2293535818' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: Active manager daemon vm03.xzkqce restarted 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: Activating manager daemon vm03.xzkqce 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: osdmap e2: 0 total, 0 up, 0 in 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: mgrmap e6: vm03.xzkqce(active, starting, since 0.00844969s) 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr metadata", "who": "vm03.xzkqce", "id": "vm03.xzkqce"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: Manager daemon vm03.xzkqce is now available 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:53:53.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:52 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/mirror_snapshot_schedule"}]: dispatch 2026-03-06T23:53:53.820 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:53:53.820 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-03-06T23:53:53.820 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-06T23:53:53.820 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:53:53.820 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 5 is available 2026-03-06T23:53:53.820 INFO:teuthology.orchestra.run.vm03.stdout:Setting orchestrator backend to cephadm... 2026-03-06T23:53:54.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:53 vm03 ceph-mon[48028]: Found migration_current of "None". Setting to last migration. 2026-03-06T23:53:54.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:53 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/trash_purge_schedule"}]: dispatch 2026-03-06T23:53:54.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:53 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:54.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:53 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:54.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:53 vm03 ceph-mon[48028]: mgrmap e7: vm03.xzkqce(active, since 1.01265s) 2026-03-06T23:53:54.578 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout value unchanged 2026-03-06T23:53:54.578 INFO:teuthology.orchestra.run.vm03.stdout:Generating ssh key... 2026-03-06T23:53:54.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-06T23:53:54.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-06T23:53:54.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: [06/Mar/2026:22:53:53] ENGINE Bus STARTING 2026-03-06T23:53:55.085 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: [06/Mar/2026:22:53:53] ENGINE Serving on http://192.168.123.103:8765 2026-03-06T23:53:55.085 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: [06/Mar/2026:22:53:54] ENGINE Serving on https://192.168.123.103:7150 2026-03-06T23:53:55.085 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: [06/Mar/2026:22:53:54] ENGINE Bus STARTED 2026-03-06T23:53:55.085 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: [06/Mar/2026:22:53:54] ENGINE Client ('192.168.123.103', 46132) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T23:53:55.085 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:53:55.085 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:55.085 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:54 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:53:55.389 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDApv906jjBJn02hwcBj2dXpUecFh4PlXTGjC7jGwNKx1ch4d1yTfkJNiIYLjhJWJyesCpOD9nTTz8UXIz2ICXPJKIkKZMXLbCoS2vyoh0Ro36LLxGjM/jxxX8RvHXgvQ5S0NfeX/4mqukMtoqo6Wii3yRywWwJ5Jq4wsRK37fYUJyJ2EVQJ8SrPLBnQT2qMa3DlRKdomsTB50LLlIrIICWxIZViuUg572FMbdSCMtkVtOs6HkBtybxOJhHr1TjWIiIRRcPgY7v831PwIZme0hOW7O7wU503dFSxG6pbEv6lXMPy0Xv1jQqDoW9N/JSCwKmx3Jl88ijHQXYivcyh03Ie4GmVDuWGel1+E9LqyqKuBqflgRTDXJSPYQ6LPWEDzIricyeoUpBKi/MPxzoXpsEq6oAa++Ta8sTnsjnhv4Ik5AU4A/H8uH8pNoMU6hARTVJCJSFYYg5rBmUdPUEb7CueZfik4xyV7oyrIgHHtlTwHSROCnflnM+8Q7m/BxdyKs= ceph-386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:53:55.389 INFO:teuthology.orchestra.run.vm03.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-06T23:53:55.389 INFO:teuthology.orchestra.run.vm03.stdout:Adding key to root@localhost authorized_keys... 2026-03-06T23:53:55.389 INFO:teuthology.orchestra.run.vm03.stdout:Adding host vm03... 2026-03-06T23:53:56.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:55 vm03 ceph-mon[48028]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:53:56.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:55 vm03 ceph-mon[48028]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:53:56.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:55 vm03 ceph-mon[48028]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:53:56.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:55 vm03 ceph-mon[48028]: Generating ssh key... 2026-03-06T23:53:56.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:55 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:56.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:55 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:56.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:55 vm03 ceph-mon[48028]: mgrmap e8: vm03.xzkqce(active, since 2s) 2026-03-06T23:53:57.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:56 vm03 ceph-mon[48028]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:53:57.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:56 vm03 ceph-mon[48028]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm03", "addr": "192.168.123.103", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:53:58.098 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Added host 'vm03' with addr '192.168.123.103' 2026-03-06T23:53:58.098 INFO:teuthology.orchestra.run.vm03.stdout:Deploying mon service with default placement... 2026-03-06T23:53:58.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:57 vm03 ceph-mon[48028]: Deploying cephadm binary to vm03 2026-03-06T23:53:58.465 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-03-06T23:53:58.465 INFO:teuthology.orchestra.run.vm03.stdout:Deploying mgr service with default placement... 2026-03-06T23:53:58.835 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-03-06T23:53:58.835 INFO:teuthology.orchestra.run.vm03.stdout:Deploying crash service with default placement... 2026-03-06T23:53:59.225 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-03-06T23:53:59.225 INFO:teuthology.orchestra.run.vm03.stdout:Deploying ceph-exporter service with default placement... 2026-03-06T23:53:59.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:59 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:59.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:59 vm03 ceph-mon[48028]: Added host vm03 2026-03-06T23:53:59.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:59 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:53:59.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:59 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:59.307 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:53:59 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:53:59.626 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-03-06T23:53:59.626 INFO:teuthology.orchestra.run.vm03.stdout:Deploying prometheus service with default placement... 2026-03-06T23:54:00.017 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-03-06T23:54:00.017 INFO:teuthology.orchestra.run.vm03.stdout:Deploying grafana service with default placement... 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: Saving service mon spec with placement count:5 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: Saving service mgr spec with placement count:2 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:00.290 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:00 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:00.456 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-03-06T23:54:00.456 INFO:teuthology.orchestra.run.vm03.stdout:Deploying node-exporter service with default placement... 2026-03-06T23:54:00.845 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-03-06T23:54:00.845 INFO:teuthology.orchestra.run.vm03.stdout:Deploying alertmanager service with default placement... 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: Saving service crash spec with placement * 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: Saving service ceph-exporter spec with placement * 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: Saving service prometheus spec with placement count:1 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:01.175 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:01 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:01.208 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-03-06T23:54:01.929 INFO:teuthology.orchestra.run.vm03.stdout:Enabling the dashboard module... 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: Saving service grafana spec with placement count:1 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: Saving service node-exporter spec with placement * 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: from='mgr.14124 192.168.123.103:0/3264257391' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2758499085' entity='client.admin' 2026-03-06T23:54:02.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:02 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3695981814' entity='client.admin' 2026-03-06T23:54:03.378 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:03 vm03 ceph-mon[48028]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:03.378 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:03 vm03 ceph-mon[48028]: Saving service alertmanager spec with placement count:1 2026-03-06T23:54:03.378 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:03 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/561881641' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "available": true, 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "active_name": "vm03.xzkqce", 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for the mgr to restart... 2026-03-06T23:54:03.556 INFO:teuthology.orchestra.run.vm03.stdout:Waiting for mgr epoch 9... 2026-03-06T23:54:04.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:04 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/561881641' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-06T23:54:04.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:04 vm03 ceph-mon[48028]: mgrmap e9: vm03.xzkqce(active, since 10s) 2026-03-06T23:54:04.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:04 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/423802249' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: Active manager daemon vm03.xzkqce restarted 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: Activating manager daemon vm03.xzkqce 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: osdmap e3: 0 total, 0 up, 0 in 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: mgrmap e10: vm03.xzkqce(active, starting, since 0.00708677s) 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr metadata", "who": "vm03.xzkqce", "id": "vm03.xzkqce"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: Manager daemon vm03.xzkqce is now available 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/mirror_snapshot_schedule"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:54:12.322 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:12 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/trash_purge_schedule"}]: dispatch 2026-03-06T23:54:13.239 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout { 2026-03-06T23:54:13.240 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-03-06T23:54:13.240 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout "initialized": true 2026-03-06T23:54:13.240 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout } 2026-03-06T23:54:13.240 INFO:teuthology.orchestra.run.vm03.stdout:mgr epoch 9 is available 2026-03-06T23:54:13.240 INFO:teuthology.orchestra.run.vm03.stdout:Generating a dashboard self-signed certificate... 2026-03-06T23:54:13.675 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout Self-signed certificate created 2026-03-06T23:54:13.675 INFO:teuthology.orchestra.run.vm03.stdout:Creating initial admin user... 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:12] ENGINE Bus STARTING 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:13] ENGINE Serving on http://192.168.123.103:8765 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:13] ENGINE Serving on https://192.168.123.103:7150 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:13] ENGINE Bus STARTED 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:13] ENGINE Client ('192.168.123.103', 37822) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: mgrmap e11: vm03.xzkqce(active, since 1.01063s) 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:14.208 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:13 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:14.212 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$wBEAufxFSuNhoMAh2lMbAu/k8xUwl.t.CZaIaL445bA5cok64bwzy", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1772837654, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-06T23:54:14.212 INFO:teuthology.orchestra.run.vm03.stdout:Fetching dashboard port number... 2026-03-06T23:54:14.550 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stdout 8443 2026-03-06T23:54:14.550 INFO:teuthology.orchestra.run.vm03.stdout:firewalld does not appear to be present 2026-03-06T23:54:14.550 INFO:teuthology.orchestra.run.vm03.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-06T23:54:14.552 INFO:teuthology.orchestra.run.vm03.stdout:Ceph Dashboard is now available at: 2026-03-06T23:54:14.552 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.552 INFO:teuthology.orchestra.run.vm03.stdout: URL: https://vm03.local:8443/ 2026-03-06T23:54:14.552 INFO:teuthology.orchestra.run.vm03.stdout: User: admin 2026-03-06T23:54:14.552 INFO:teuthology.orchestra.run.vm03.stdout: Password: ju0dkdjcls 2026-03-06T23:54:14.552 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.552 INFO:teuthology.orchestra.run.vm03.stdout:Saving cluster configuration to /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config directory 2026-03-06T23:54:14.941 INFO:teuthology.orchestra.run.vm03.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout:Or, if you are only running a single cluster on this host: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout:Please consider enabling telemetry to help improve Ceph: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: ceph telemetry on 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout:For more information see: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:14.942 INFO:teuthology.orchestra.run.vm03.stdout:Bootstrap complete. 2026-03-06T23:54:14.975 INFO:tasks.cephadm:Fetching config... 2026-03-06T23:54:14.975 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:54:14.975 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-06T23:54:15.003 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-06T23:54:15.003 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:54:15.003 DEBUG:teuthology.orchestra.run.vm03:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-06T23:54:15.073 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-06T23:54:15.073 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:54:15.073 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/keyring of=/dev/stdout 2026-03-06T23:54:15.140 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-06T23:54:15.140 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:54:15.140 DEBUG:teuthology.orchestra.run.vm03:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-06T23:54:15.197 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-06T23:54:15.198 DEBUG:teuthology.orchestra.run.vm03:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDApv906jjBJn02hwcBj2dXpUecFh4PlXTGjC7jGwNKx1ch4d1yTfkJNiIYLjhJWJyesCpOD9nTTz8UXIz2ICXPJKIkKZMXLbCoS2vyoh0Ro36LLxGjM/jxxX8RvHXgvQ5S0NfeX/4mqukMtoqo6Wii3yRywWwJ5Jq4wsRK37fYUJyJ2EVQJ8SrPLBnQT2qMa3DlRKdomsTB50LLlIrIICWxIZViuUg572FMbdSCMtkVtOs6HkBtybxOJhHr1TjWIiIRRcPgY7v831PwIZme0hOW7O7wU503dFSxG6pbEv6lXMPy0Xv1jQqDoW9N/JSCwKmx3Jl88ijHQXYivcyh03Ie4GmVDuWGel1+E9LqyqKuBqflgRTDXJSPYQ6LPWEDzIricyeoUpBKi/MPxzoXpsEq6oAa++Ta8sTnsjnhv4Ik5AU4A/H8uH8pNoMU6hARTVJCJSFYYg5rBmUdPUEb7CueZfik4xyV7oyrIgHHtlTwHSROCnflnM+8Q7m/BxdyKs= ceph-386eb88a-19af-11f1-876d-93c9c802cc09' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-06T23:54:15.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:14 vm03 ceph-mon[48028]: from='client.14172 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-06T23:54:15.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:14 vm03 ceph-mon[48028]: from='client.14172 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-06T23:54:15.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:14 vm03 ceph-mon[48028]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:15.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:14 vm03 ceph-mon[48028]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:15.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:14 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:15.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:14 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3199525334' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-06T23:54:15.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:14 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1940046546' entity='client.admin' 2026-03-06T23:54:15.271 INFO:teuthology.orchestra.run.vm03.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDApv906jjBJn02hwcBj2dXpUecFh4PlXTGjC7jGwNKx1ch4d1yTfkJNiIYLjhJWJyesCpOD9nTTz8UXIz2ICXPJKIkKZMXLbCoS2vyoh0Ro36LLxGjM/jxxX8RvHXgvQ5S0NfeX/4mqukMtoqo6Wii3yRywWwJ5Jq4wsRK37fYUJyJ2EVQJ8SrPLBnQT2qMa3DlRKdomsTB50LLlIrIICWxIZViuUg572FMbdSCMtkVtOs6HkBtybxOJhHr1TjWIiIRRcPgY7v831PwIZme0hOW7O7wU503dFSxG6pbEv6lXMPy0Xv1jQqDoW9N/JSCwKmx3Jl88ijHQXYivcyh03Ie4GmVDuWGel1+E9LqyqKuBqflgRTDXJSPYQ6LPWEDzIricyeoUpBKi/MPxzoXpsEq6oAa++Ta8sTnsjnhv4Ik5AU4A/H8uH8pNoMU6hARTVJCJSFYYg5rBmUdPUEb7CueZfik4xyV7oyrIgHHtlTwHSROCnflnM+8Q7m/BxdyKs= ceph-386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:54:15.281 DEBUG:teuthology.orchestra.run.vm08:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDApv906jjBJn02hwcBj2dXpUecFh4PlXTGjC7jGwNKx1ch4d1yTfkJNiIYLjhJWJyesCpOD9nTTz8UXIz2ICXPJKIkKZMXLbCoS2vyoh0Ro36LLxGjM/jxxX8RvHXgvQ5S0NfeX/4mqukMtoqo6Wii3yRywWwJ5Jq4wsRK37fYUJyJ2EVQJ8SrPLBnQT2qMa3DlRKdomsTB50LLlIrIICWxIZViuUg572FMbdSCMtkVtOs6HkBtybxOJhHr1TjWIiIRRcPgY7v831PwIZme0hOW7O7wU503dFSxG6pbEv6lXMPy0Xv1jQqDoW9N/JSCwKmx3Jl88ijHQXYivcyh03Ie4GmVDuWGel1+E9LqyqKuBqflgRTDXJSPYQ6LPWEDzIricyeoUpBKi/MPxzoXpsEq6oAa++Ta8sTnsjnhv4Ik5AU4A/H8uH8pNoMU6hARTVJCJSFYYg5rBmUdPUEb7CueZfik4xyV7oyrIgHHtlTwHSROCnflnM+8Q7m/BxdyKs= ceph-386eb88a-19af-11f1-876d-93c9c802cc09' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-06T23:54:15.311 INFO:teuthology.orchestra.run.vm08.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDApv906jjBJn02hwcBj2dXpUecFh4PlXTGjC7jGwNKx1ch4d1yTfkJNiIYLjhJWJyesCpOD9nTTz8UXIz2ICXPJKIkKZMXLbCoS2vyoh0Ro36LLxGjM/jxxX8RvHXgvQ5S0NfeX/4mqukMtoqo6Wii3yRywWwJ5Jq4wsRK37fYUJyJ2EVQJ8SrPLBnQT2qMa3DlRKdomsTB50LLlIrIICWxIZViuUg572FMbdSCMtkVtOs6HkBtybxOJhHr1TjWIiIRRcPgY7v831PwIZme0hOW7O7wU503dFSxG6pbEv6lXMPy0Xv1jQqDoW9N/JSCwKmx3Jl88ijHQXYivcyh03Ie4GmVDuWGel1+E9LqyqKuBqflgRTDXJSPYQ6LPWEDzIricyeoUpBKi/MPxzoXpsEq6oAa++Ta8sTnsjnhv4Ik5AU4A/H8uH8pNoMU6hARTVJCJSFYYg5rBmUdPUEb7CueZfik4xyV7oyrIgHHtlTwHSROCnflnM+8Q7m/BxdyKs= ceph-386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:54:15.320 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-06T23:54:15.656 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:54:16.067 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-06T23:54:16.067 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-06T23:54:16.264 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:16 vm03 ceph-mon[48028]: mgrmap e12: vm03.xzkqce(active, since 2s) 2026-03-06T23:54:16.264 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:16 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2540088039' entity='client.admin' 2026-03-06T23:54:16.425 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:54:16.781 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm08 2026-03-06T23:54:16.781 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-06T23:54:16.781 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.conf 2026-03-06T23:54:16.795 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-06T23:54:16.795 DEBUG:teuthology.orchestra.run.vm08:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:54:16.849 INFO:tasks.cephadm:Adding host vm08 to orchestrator... 2026-03-06T23:54:16.849 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch host add vm08 2026-03-06T23:54:17.162 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:54:18.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:17 vm03 ceph-mon[48028]: from='client.14190 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:18.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:17 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:18.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:17 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:18.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:17 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:18.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:17 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:54:18.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:17 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:18.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:17 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:54:18.873 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: from='client.14192 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:18.873 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: Updating vm03:/etc/ceph/ceph.conf 2026-03-06T23:54:18.873 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.client.admin.keyring 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-06T23:54:18.874 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:18 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: Deploying daemon ceph-exporter.vm03 on vm03 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: Deploying cephadm binary to vm08 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: mgrmap e13: vm03.xzkqce(active, since 6s) 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-06T23:54:19.961 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:19 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:19.976 INFO:teuthology.orchestra.run.vm03.stdout:Added host 'vm08' with addr '192.168.123.108' 2026-03-06T23:54:20.065 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch host ls --format=json 2026-03-06T23:54:20.466 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:54:20.811 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:54:20.811 INFO:teuthology.orchestra.run.vm03.stdout:[{"addr": "192.168.123.103", "hostname": "vm03", "labels": [], "status": ""}, {"addr": "192.168.123.108", "hostname": "vm08", "labels": [], "status": ""}] 2026-03-06T23:54:20.945 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-06T23:54:20.946 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd crush tunables default 2026-03-06T23:54:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:20 vm03 ceph-mon[48028]: Deploying daemon crash.vm03 on vm03 2026-03-06T23:54:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:20 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:20 vm03 ceph-mon[48028]: Added host vm08 2026-03-06T23:54:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:20 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:20 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:20 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:20 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:21.335 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:54:21.975 INFO:teuthology.orchestra.run.vm03.stderr:adjusted tunables profile to default 2026-03-06T23:54:22.024 INFO:tasks.cephadm:Adding mon.vm03 on vm03 2026-03-06T23:54:22.024 INFO:tasks.cephadm:Adding mon.vm08 on vm08 2026-03-06T23:54:22.024 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch apply mon '2;vm03:192.168.123.103=vm03;vm08:192.168.123.108=vm08' 2026-03-06T23:54:22.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:21 vm03 ceph-mon[48028]: Deploying daemon node-exporter.vm03 on vm03 2026-03-06T23:54:22.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:21 vm03 ceph-mon[48028]: from='client.14195 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:54:22.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:21 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1420454496' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-06T23:54:22.315 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:22.346 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:22.654 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled mon update... 2026-03-06T23:54:22.699 DEBUG:teuthology.orchestra.run.vm08:mon.vm08> sudo journalctl -f -n 0 -u ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm08.service 2026-03-06T23:54:22.701 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:22.701 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:23.034 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:23.068 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:23.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:22 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1420454496' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-06T23:54:23.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:22 vm03 ceph-mon[48028]: osdmap e4: 0 total, 0 up, 0 in 2026-03-06T23:54:23.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:22 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:23.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:22 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:23.415 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:23.415 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:23.415 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:24.483 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:24.484 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:24.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:24 vm03 ceph-mon[48028]: from='client.14199 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm03:192.168.123.103=vm03;vm08:192.168.123.108=vm08", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:54:24.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:24 vm03 ceph-mon[48028]: Saving service mon spec with placement vm03:192.168.123.103=vm03;vm08:192.168.123.108=vm08;count:2 2026-03-06T23:54:24.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:24 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:24.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:24 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:24.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:24 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:24.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:24 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:24.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:24 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/2544451004' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:24.770 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:24.801 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:25.133 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:25.133 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:25.134 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:25.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:25 vm03 ceph-mon[48028]: Deploying daemon alertmanager.vm03 on vm03 2026-03-06T23:54:25.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:25 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3636801970' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:26.199 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:26.199 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:26.513 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:26.548 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:26.882 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:26.882 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:26.882 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:27.947 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:27.947 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: Deploying daemon grafana.vm03 on vm03 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1393196489' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:28.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:27 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:28.253 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:28.287 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:28.623 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:28.623 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:28.623 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:29.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:28 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3592699392' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:29.687 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:29.687 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:29.993 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:30.028 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:30.372 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:30.372 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:30.372 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:30.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:30 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/374811450' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:31.446 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:31.447 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:31.759 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:31.797 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:32.150 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:32.150 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:32.150 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:32.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:32 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3329465412' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:33.280 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:33.281 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:33.589 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:33.625 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:33.974 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:33.974 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:33.974 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:34.162 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:33 vm03 ceph-mon[48028]: Deploying daemon prometheus.vm03 on vm03 2026-03-06T23:54:35.026 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:35.026 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:35.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:34 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1302711595' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:35.325 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:35.364 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:35.715 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:35.715 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:35.715 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:36.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:35 vm03 ceph-mon[48028]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:54:36.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:35 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/45079981' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:36.783 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:36.783 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:37.096 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:37.133 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:37.646 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:37.646 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:37.647 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:37.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:37 vm03 ceph-mon[48028]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:54:37.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:37 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:38.503 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:38 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/2308780394' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:38.715 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:38.716 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:39.017 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:39.047 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:39.375 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:39.375 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:39.375 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:39.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:39 vm03 ceph-mon[48028]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:54:39.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:39 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:39.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:39 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:39.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:39 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:39.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:39 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-06T23:54:39.823 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:39 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1408237822' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:40.439 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:40.439 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:40.740 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:40.772 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:41.026 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:40 vm03 ceph-mon[48028]: from='mgr.14168 192.168.123.103:0/2722471058' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-06T23:54:41.026 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:40 vm03 ceph-mon[48028]: mgrmap e14: vm03.xzkqce(active, since 27s) 2026-03-06T23:54:41.135 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:41.135 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:41.135 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:41.842 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:41 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3407490169' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:42.200 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:42.200 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:42.504 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:42.536 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:42.877 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:42.877 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:42.877 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:43.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:42 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/2208750192' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:43.944 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:43.944 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:44.256 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:44.293 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:44.653 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:44.654 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:44.654 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:44.835 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:44 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3792573797' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:45.721 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:45.722 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:46.017 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:46.051 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:46.388 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:46.389 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:46.389 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:46.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:46 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3365222766' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:47.436 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:47.436 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:47.740 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:47.773 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:48.108 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:48.108 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:48.108 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:48.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:48 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3538790731' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:49.155 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:49.156 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:49.481 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:49.522 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: Active manager daemon vm03.xzkqce restarted 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: Activating manager daemon vm03.xzkqce 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: mgrmap e15: vm03.xzkqce(active, starting, since 0.00380253s) 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr metadata", "who": "vm03.xzkqce", "id": "vm03.xzkqce"}]: dispatch 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: Manager daemon vm03.xzkqce is now available 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:54:49.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:54:49.539 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/mirror_snapshot_schedule"}]: dispatch 2026-03-06T23:54:49.539 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/trash_purge_schedule"}]: dispatch 2026-03-06T23:54:49.909 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:49.909 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:49.909 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:50.976 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:50.976 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:51.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:51.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:49] ENGINE Bus STARTING 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:49] ENGINE Serving on http://192.168.123.103:8765 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: mgrmap e16: vm03.xzkqce(active, since 1.00739s) 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/2537956765' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:49] ENGINE Serving on https://192.168.123.103:7150 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:49] ENGINE Bus STARTED 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: [06/Mar/2026:22:54:49] ENGINE Client ('192.168.123.103', 57806) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:51.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:51.336 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:51.377 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /etc/ceph/ceph.conf 2026-03-06T23:54:51.709 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:51.709 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:51.709 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:52.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:52.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:52.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:52.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:54:52.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:51 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/124436538' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:52.760 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:52.760 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:53.086 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:54:53.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: mgrmap e17: vm03.xzkqce(active, since 2s) 2026-03-06T23:54:53.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: Updating vm03:/etc/ceph/ceph.conf 2026-03-06T23:54:53.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:52 vm03 ceph-mon[48028]: Updating vm08:/etc/ceph/ceph.conf 2026-03-06T23:54:53.471 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:53.471 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:53.471 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: Updating vm08:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: Updating vm08:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.client.admin.keyring 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.client.admin.keyring 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm08", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm08", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: Deploying daemon ceph-exporter.vm08 on vm08 2026-03-06T23:54:54.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:54 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/117015692' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:54.560 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:54.560 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:54.918 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:54:55.327 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:55.327 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:55.328 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:55.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: Deploying daemon crash.vm08 on vm08 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1805065402' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:55 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:56.403 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:56.403 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:56.703 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:54:56.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:56 vm03 ceph-mon[48028]: Deploying daemon node-exporter.vm08 on vm08 2026-03-06T23:54:57.043 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:57.044 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:57.044 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:57.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:57 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3051547309' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:58.107 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:58.107 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:54:58.425 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:54:58.823 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:54:58.823 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":1,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:53:24.279325Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-06T23:54:58.823 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 1 2026-03-06T23:54:59.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.bnopnr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T23:54:59.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm08.bnopnr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: Deploying daemon mgr.vm08.bnopnr on vm08 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/2395473051' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T23:54:59.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:54:59 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:54:59.919 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-03-06T23:54:59.920 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mon dump -f json 2026-03-06T23:55:00.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 systemd[1]: Starting Ceph mon.vm08 for 386eb88a-19af-11f1-876d-93c9c802cc09... 2026-03-06T23:55:00.384 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm08/config 2026-03-06T23:55:00.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:00 vm03 ceph-mon[48028]: Deploying daemon mon.vm08 on vm08 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 podman[55982]: 2026-03-06 23:55:00.411454313 +0100 CET m=+0.060087405 container create f4a7f049da3e84db210984b273391def91d1eec5d56d7e3d34e21c7c08f32a91 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 podman[55982]: 2026-03-06 23:55:00.465529112 +0100 CET m=+0.114162204 container init f4a7f049da3e84db210984b273391def91d1eec5d56d7e3d34e21c7c08f32a91 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6) 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 podman[55982]: 2026-03-06 23:55:00.468121574 +0100 CET m=+0.116754656 container start f4a7f049da3e84db210984b273391def91d1eec5d56d7e3d34e21c7c08f32a91 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 bash[55982]: f4a7f049da3e84db210984b273391def91d1eec5d56d7e3d34e21c7c08f32a91 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 podman[55982]: 2026-03-06 23:55:00.389187402 +0100 CET m=+0.037820505 image pull 8bccc98d839aa18345ec1336292d0452ca331737e49f12524f635044dcabcfe1 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 systemd[1]: Started Ceph mon.vm08 for 386eb88a-19af-11f1-876d-93c9c802cc09. 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: set uid:gid to 167:167 (ceph:ceph) 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: ceph version 19.2.3-39-g340d3c24fc6 (340d3c24fc6ae7529322dc7ccee6c6cb2589da0a) squid (stable), process ceph-mon, pid 2 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: pidfile_write: ignore empty --pid-file 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: load: jerasure load: lrc 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: RocksDB version: 7.9.2 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Git sha 0 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Compile date 2026-03-06 13:52:12 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: DB SUMMARY 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: DB Session ID: D8P1NH3PP196O4UQF59V 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: CURRENT file: CURRENT 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: IDENTITY file: IDENTITY 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm08/store.db dir, Total Num: 0, files: 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm08/store.db: 000004.log size: 511 ; 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.error_if_exists: 0 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.create_if_missing: 0 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.paranoid_checks: 1 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-06T23:55:00.747 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.env: 0x55f0156ceca0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.fs: PosixFileSystem 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.info_log: 0x55f016035820 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_file_opening_threads: 16 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.statistics: (nil) 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.use_fsync: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_log_file_size: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.keep_log_file_num: 1000 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.recycle_log_file_num: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.allow_fallocate: 1 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.allow_mmap_reads: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.allow_mmap_writes: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.use_direct_reads: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.create_missing_column_families: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.db_log_dir: 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.wal_dir: 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.advise_random_on_open: 1 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.db_write_buffer_size: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.write_buffer_manager: 0x55f016039900 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.rate_limiter: (nil) 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.wal_recovery_mode: 2 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.enable_thread_tracking: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.enable_pipelined_write: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.unordered_write: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.row_cache: None 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.wal_filter: None 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.allow_ingest_behind: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.two_write_queues: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.manual_wal_flush: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.wal_compression: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.atomic_flush: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.log_readahead_size: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.best_efforts_recovery: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.allow_data_in_errors: 0 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.db_host_id: __hostname__ 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_background_jobs: 2 2026-03-06T23:55:00.748 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_background_compactions: -1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_subcompactions: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_total_wal_size: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_open_files: -1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bytes_per_sync: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_readahead_size: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_background_flushes: -1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Compression algorithms supported: 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kZSTD supported: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kXpressCompression supported: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kBZip2Compression supported: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kLZ4Compression supported: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kZlibCompression supported: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kLZ4HCCompression supported: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: kSnappyCompression supported: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm08/store.db/MANIFEST-000005 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.merge_operator: 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_filter: None 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_filter_factory: None 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.sst_partitioner_factory: None 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55f016035460) 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: cache_index_and_filter_blocks: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: pin_top_level_index_and_filter: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: index_type: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: data_block_index_type: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: index_shortening: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: checksum: 4 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: no_block_cache: 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache: 0x55f016059350 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache_name: BinnedLRUCache 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache_options: 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: capacity : 536870912 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: num_shard_bits : 4 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: strict_capacity_limit : 0 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: high_pri_pool_ratio: 0.000 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_cache_compressed: (nil) 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: persistent_cache: (nil) 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_size: 4096 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_size_deviation: 10 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_restart_interval: 16 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: index_block_restart_interval: 1 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: metadata_block_size: 4096 2026-03-06T23:55:00.749 INFO:journalctl@ceph.mon.vm08.vm08.stdout: partition_filters: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: use_delta_encoding: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: filter_policy: bloomfilter 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: whole_key_filtering: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: verify_compression: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: read_amp_bytes_per_bit: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: format_version: 5 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: enable_index_compression: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: block_align: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: max_auto_readahead_size: 262144 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: prepopulate_block_cache: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: initial_auto_readahead_size: 8192 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout: num_file_reads_for_auto_readahead: 2 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.write_buffer_size: 33554432 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_write_buffer_number: 2 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression: NoCompression 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression: Disabled 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.prefix_extractor: nullptr 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.num_levels: 7 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.level: 32767 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.strategy: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.enabled: false 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.target_file_size_base: 67108864 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-06T23:55:00.750 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.arena_block_size: 1048576 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.disable_auto_compactions: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.inplace_update_support: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.bloom_locality: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.max_successive_merges: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.paranoid_file_checks: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.force_consistency_checks: 1 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.report_bg_io_stats: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.ttl: 2592000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.enable_blob_files: false 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.min_blob_size: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.blob_file_size: 268435456 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.blob_file_starting_level: 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm08/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 76e46db3-674e-4132-a146-058ecdc1d7fa 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772837700519552, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772837700526993, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1772837700, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "76e46db3-674e-4132-a146-058ecdc1d7fa", "db_session_id": "D8P1NH3PP196O4UQF59V", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: EVENT_LOG_v1 {"time_micros": 1772837700527063, "job": 1, "event": "recovery_finished"} 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm08/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55f01605ae00 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: DB pointer 0x55f01606e000 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08 does not exist in monmap, will attempt to join an existing cluster 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: using public_addr v2:192.168.123.108:0/0 -> [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** DB Stats ** 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-06T23:55:00.751 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** Compaction Stats [default] ** 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.2 0.01 0.00 1 0.007 0 0 0.0 0.0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.2 0.01 0.00 1 0.007 0 0 0.0 0.0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.2 0.01 0.00 1 0.007 0 0 0.0 0.0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** Compaction Stats [default] ** 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.2 0.01 0.00 1 0.007 0 0 0.0 0.0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Cumulative compaction: 0.00 GB write, 0.09 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Interval compaction: 0.00 GB write, 0.09 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Block cache BinnedLRUCache@0x55f016059350#2 capacity: 512.00 MB usage: 0.86 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.6e-05 secs_since: 0 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: Block cache entry stats(count,size,portion): DataBlock(1,0.64 KB,0.00012219%) FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: starting mon.vm08 rank -1 at public addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] at bind addrs [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon_data /var/lib/ceph/mon/ceph-vm08 fsid 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(???) e0 preinit fsid 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).mds e1 new map 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).mds e1 print_map 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: e1 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: btime 2026-03-06T22:53:25:536236+0000 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: legacy client fscid: -1 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout: No filesystems configured 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/3538790731' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Active manager daemon vm03.xzkqce restarted 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Activating manager daemon vm03.xzkqce 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mgrmap e15: vm03.xzkqce(active, starting, since 0.00380253s) 2026-03-06T23:55:00.752 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr metadata", "who": "vm03.xzkqce", "id": "vm03.xzkqce"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Manager daemon vm03.xzkqce is now available 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/mirror_snapshot_schedule"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm03.xzkqce/trash_purge_schedule"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: [06/Mar/2026:22:54:49] ENGINE Bus STARTING 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: [06/Mar/2026:22:54:49] ENGINE Serving on http://192.168.123.103:8765 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mgrmap e16: vm03.xzkqce(active, since 1.00739s) 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/2537956765' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: [06/Mar/2026:22:54:49] ENGINE Serving on https://192.168.123.103:7150 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: [06/Mar/2026:22:54:49] ENGINE Bus STARTED 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: [06/Mar/2026:22:54:49] ENGINE Client ('192.168.123.103', 57806) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/124436538' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mgrmap e17: vm03.xzkqce(active, since 2s) 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm03:/etc/ceph/ceph.conf 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm08:/etc/ceph/ceph.conf 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm08:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm08:/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm03:/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm08:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.client.admin.keyring 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.client.admin.keyring 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm08", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm08", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Deploying daemon ceph-exporter.vm08 on vm08 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/117015692' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Deploying daemon crash.vm08 on vm08 2026-03-06T23:55:00.753 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/1805065402' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Deploying daemon node-exporter.vm08 on vm08 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/3051547309' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.bnopnr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm08.bnopnr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Deploying daemon mgr.vm08.bnopnr on vm08 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/2395473051' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: Deploying daemon mon.vm08 on vm08 2026-03-06T23:55:00.754 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:00 vm08 ceph-mon[56019]: mon.vm08@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-03-06T23:55:05.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: mon.vm03 calling monitor election 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: mon.vm08 calling monitor election 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: mon.vm03 is new leader, mons vm03,vm08 in quorum (ranks 0,1) 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: monmap epoch 2 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: fsid 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: last_changed 2026-03-06T22:55:00.595976+0000 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: created 2026-03-06T22:53:24.279325+0000 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: min_mon_release 19 (squid) 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: election_strategy: 1 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.vm03 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.vm08 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: fsmap 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: mgrmap e17: vm03.xzkqce(active, since 16s) 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: overall HEALTH_OK 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:05.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:05 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: mon.vm03 calling monitor election 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: mon.vm08 calling monitor election 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: mon.vm03 is new leader, mons vm03,vm08 in quorum (ranks 0,1) 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: monmap epoch 2 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: fsid 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: last_changed 2026-03-06T22:55:00.595976+0000 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: created 2026-03-06T22:53:24.279325+0000 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: min_mon_release 19 (squid) 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: election_strategy: 1 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: 0: [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] mon.vm03 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: 1: [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] mon.vm08 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: fsmap 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: osdmap e5: 0 total, 0 up, 0 in 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: mgrmap e17: vm03.xzkqce(active, since 16s) 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: overall HEALTH_OK 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:06.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:05 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:55:06.910 INFO:teuthology.orchestra.run.vm08.stdout: 2026-03-06T23:55:06.910 INFO:teuthology.orchestra.run.vm08.stdout:{"epoch":2,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","modified":"2026-03-06T22:55:00.595976Z","created":"2026-03-06T22:53:24.279325Z","min_mon_release":19,"min_mon_release_name":"squid","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid"],"optional":[]},"mons":[{"rank":0,"name":"vm03","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:3300","nonce":0},{"type":"v1","addr":"192.168.123.103:6789","nonce":0}]},"addr":"192.168.123.103:6789/0","public_addr":"192.168.123.103:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm08","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:3300","nonce":0},{"type":"v1","addr":"192.168.123.108:6789","nonce":0}]},"addr":"192.168.123.108:6789/0","public_addr":"192.168.123.108:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-03-06T23:55:06.911 INFO:teuthology.orchestra.run.vm08.stderr:dumped monmap epoch 2 2026-03-06T23:55:06.980 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-06T23:55:06.980 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph config generate-minimal-conf 2026-03-06T23:55:07.177 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Updating vm03:/etc/ceph/ceph.conf 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Updating vm08:/etc/ceph/ceph.conf 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Updating vm08:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Reconfiguring mon.vm03 (unknown last config time)... 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Reconfiguring daemon mon.vm03 on vm03 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Reconfiguring mgr.vm03.xzkqce (unknown last config time)... 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.xzkqce", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: Reconfiguring daemon mgr.vm03.xzkqce on vm03 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:07.178 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:06 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/1746744126' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:07.238 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Updating vm03:/etc/ceph/ceph.conf 2026-03-06T23:55:07.238 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Updating vm08:/etc/ceph/ceph.conf 2026-03-06T23:55:07.238 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Updating vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Updating vm08:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/config/ceph.conf 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Reconfiguring mon.vm03 (unknown last config time)... 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Reconfiguring daemon mon.vm03 on vm03 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Reconfiguring mgr.vm03.xzkqce (unknown last config time)... 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm03.xzkqce", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: Reconfiguring daemon mgr.vm03.xzkqce on vm03 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm03", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:07.239 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:06 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1746744126' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-06T23:55:07.368 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:07.701 INFO:teuthology.orchestra.run.vm03.stdout:# minimal ceph.conf for 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:55:07.701 INFO:teuthology.orchestra.run.vm03.stdout:[global] 2026-03-06T23:55:07.701 INFO:teuthology.orchestra.run.vm03.stdout: fsid = 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:55:07.701 INFO:teuthology.orchestra.run.vm03.stdout: mon_host = [v2:192.168.123.103:3300/0,v1:192.168.123.103:6789/0] [v2:192.168.123.108:3300/0,v1:192.168.123.108:6789/0] 2026-03-06T23:55:07.777 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-06T23:55:07.778 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:55:07.778 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.conf 2026-03-06T23:55:07.835 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:55:07.835 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:55:07.919 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-06T23:55:07.919 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.conf 2026-03-06T23:55:07.949 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-06T23:55:07.949 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-06T23:55:08.015 INFO:tasks.cephadm:Deploying OSDs... 2026-03-06T23:55:08.016 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:55:08.016 DEBUG:teuthology.orchestra.run.vm03:> dd if=/scratch_devs of=/dev/stdout 2026-03-06T23:55:08.030 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:55:08.038 DEBUG:teuthology.orchestra.run.vm03:> ls /dev/[sv]d? 2026-03-06T23:55:08.092 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vda 2026-03-06T23:55:08.092 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdb 2026-03-06T23:55:08.092 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdc 2026-03-06T23:55:08.092 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vdd 2026-03-06T23:55:08.092 INFO:teuthology.orchestra.run.vm03.stdout:/dev/vde 2026-03-06T23:55:08.092 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-06T23:55:08.092 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-06T23:55:08.092 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdb 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdb 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 244 Links: 1 Device type: fc,10 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-06 23:54:17.512292546 +0100 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-06 23:52:57.495316616 +0100 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-06 23:52:57.495316616 +0100 2026-03-06T23:55:08.148 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-06 23:49:59.237000000 +0100 2026-03-06T23:55:08.149 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-06T23:55:08.211 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-06T23:55:08.211 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-06T23:55:08.211 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000107872 s, 4.7 MB/s 2026-03-06T23:55:08.212 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-06T23:55:08.284 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdc 2026-03-06T23:55:08.347 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdc 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-06 23:54:17.554292540 +0100 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-06 23:52:57.531316620 +0100 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-06 23:52:57.531316620 +0100 2026-03-06T23:55:08.348 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-06 23:49:59.243000000 +0100 2026-03-06T23:55:08.348 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-06T23:55:08.411 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-06T23:55:08.411 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-06T23:55:08.411 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000136195 s, 3.8 MB/s 2026-03-06T23:55:08.412 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-06T23:55:08.470 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vdd 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vdd 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-06 23:54:17.586292536 +0100 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-06 23:52:57.509316617 +0100 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-06 23:52:57.509316617 +0100 2026-03-06T23:55:08.527 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-06 23:49:59.244000000 +0100 2026-03-06T23:55:08.527 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-06T23:55:08.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: Reconfiguring ceph-exporter.vm03 (monmap changed)... 2026-03-06T23:55:08.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: Reconfiguring daemon ceph-exporter.vm03 on vm03 2026-03-06T23:55:08.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: Reconfiguring crash.vm03 (monmap changed)... 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: Reconfiguring daemon crash.vm03 on vm03 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/309117987' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:08 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.560 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-06T23:55:08.560 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-06T23:55:08.560 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000113361 s, 4.5 MB/s 2026-03-06T23:55:08.561 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-06T23:55:08.628 DEBUG:teuthology.orchestra.run.vm03:> stat /dev/vde 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout: File: /dev/vde 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout:Access: 2026-03-06 23:54:17.615292532 +0100 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout:Modify: 2026-03-06 23:52:57.508316617 +0100 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout:Change: 2026-03-06 23:52:57.508316617 +0100 2026-03-06T23:55:08.708 INFO:teuthology.orchestra.run.vm03.stdout: Birth: 2026-03-06 23:49:59.299000000 +0100 2026-03-06T23:55:08.708 DEBUG:teuthology.orchestra.run.vm03:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-06T23:55:08.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: Reconfiguring ceph-exporter.vm03 (monmap changed)... 2026-03-06T23:55:08.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: Reconfiguring daemon ceph-exporter.vm03 on vm03 2026-03-06T23:55:08.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: Reconfiguring crash.vm03 (monmap changed)... 2026-03-06T23:55:08.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm03", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T23:55:08.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:08.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: Reconfiguring daemon crash.vm03 on vm03 2026-03-06T23:55:08.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/309117987' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:08.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:08 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:08.781 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records in 2026-03-06T23:55:08.781 INFO:teuthology.orchestra.run.vm03.stderr:1+0 records out 2026-03-06T23:55:08.781 INFO:teuthology.orchestra.run.vm03.stderr:512 bytes copied, 0.000219471 s, 2.3 MB/s 2026-03-06T23:55:08.782 DEBUG:teuthology.orchestra.run.vm03:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-06T23:55:08.840 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-06T23:55:08.840 DEBUG:teuthology.orchestra.run.vm08:> dd if=/scratch_devs of=/dev/stdout 2026-03-06T23:55:08.862 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:55:08.862 DEBUG:teuthology.orchestra.run.vm08:> ls /dev/[sv]d? 2026-03-06T23:55:08.921 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vda 2026-03-06T23:55:08.921 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdb 2026-03-06T23:55:08.921 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdc 2026-03-06T23:55:08.921 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vdd 2026-03-06T23:55:08.921 INFO:teuthology.orchestra.run.vm08.stdout:/dev/vde 2026-03-06T23:55:08.921 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-06T23:55:08.921 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-06T23:55:08.921 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdb 2026-03-06T23:55:08.983 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdb 2026-03-06T23:55:08.983 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:08.983 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-06T23:55:08.983 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:08.984 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:08.984 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-06 23:54:52.364368602 +0100 2026-03-06T23:55:08.984 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-06 23:52:57.417936078 +0100 2026-03-06T23:55:08.984 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-06 23:52:57.417936078 +0100 2026-03-06T23:55:08.984 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-06 23:50:24.252000000 +0100 2026-03-06T23:55:08.984 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-06T23:55:09.051 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-06T23:55:09.051 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-06T23:55:09.051 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000179446 s, 2.9 MB/s 2026-03-06T23:55:09.052 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-06T23:55:09.113 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdc 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdc 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-06 23:54:52.394368614 +0100 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-06 23:52:57.448936088 +0100 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-06 23:52:57.448936088 +0100 2026-03-06T23:55:09.169 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-06 23:50:24.257000000 +0100 2026-03-06T23:55:09.169 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-06T23:55:09.231 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-06T23:55:09.231 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-06T23:55:09.231 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000191588 s, 2.7 MB/s 2026-03-06T23:55:09.232 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-06T23:55:09.290 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vdd 2026-03-06T23:55:09.346 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vdd 2026-03-06T23:55:09.346 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:09.346 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-06T23:55:09.346 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:09.347 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:09.347 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-06 23:54:52.421368624 +0100 2026-03-06T23:55:09.347 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-06 23:52:57.434936083 +0100 2026-03-06T23:55:09.347 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-06 23:52:57.434936083 +0100 2026-03-06T23:55:09.347 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-06 23:50:24.262000000 +0100 2026-03-06T23:55:09.347 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-06T23:55:09.408 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: Reconfiguring alertmanager.vm03 (dependencies changed)... 2026-03-06T23:55:09.408 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: Reconfiguring daemon alertmanager.vm03 on vm03 2026-03-06T23:55:09.408 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: Standby manager daemon vm08.bnopnr started 2026-03-06T23:55:09.408 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.bnopnr/crt"}]: dispatch 2026-03-06T23:55:09.408 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-06T23:55:09.408 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.bnopnr/key"}]: dispatch 2026-03-06T23:55:09.409 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-06T23:55:09.409 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:09.409 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:09 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:09.411 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-06T23:55:09.411 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-06T23:55:09.411 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000153848 s, 3.3 MB/s 2026-03-06T23:55:09.412 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-06T23:55:09.468 DEBUG:teuthology.orchestra.run.vm08:> stat /dev/vde 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout: File: /dev/vde 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout:Access: 2026-03-06 23:54:52.447368635 +0100 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout:Modify: 2026-03-06 23:52:57.447936088 +0100 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout:Change: 2026-03-06 23:52:57.447936088 +0100 2026-03-06T23:55:09.525 INFO:teuthology.orchestra.run.vm08.stdout: Birth: 2026-03-06 23:50:24.268000000 +0100 2026-03-06T23:55:09.525 DEBUG:teuthology.orchestra.run.vm08:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-06T23:55:09.590 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records in 2026-03-06T23:55:09.590 INFO:teuthology.orchestra.run.vm08.stderr:1+0 records out 2026-03-06T23:55:09.590 INFO:teuthology.orchestra.run.vm08.stderr:512 bytes copied, 0.000153528 s, 3.3 MB/s 2026-03-06T23:55:09.591 DEBUG:teuthology.orchestra.run.vm08:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: Reconfiguring alertmanager.vm03 (dependencies changed)... 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: Reconfiguring daemon alertmanager.vm03 on vm03 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: Standby manager daemon vm08.bnopnr started 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.bnopnr/crt"}]: dispatch 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/vm08.bnopnr/key"}]: dispatch 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: from='mgr.? 192.168.123.108:0/2381935868' entity='mgr.vm08.bnopnr' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:09.635 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:09 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:09.650 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch apply osd --all-available-devices 2026-03-06T23:55:10.007 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm08/config 2026-03-06T23:55:10.348 INFO:teuthology.orchestra.run.vm08.stdout:Scheduled osd.all-available-devices update... 2026-03-06T23:55:10.403 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-06T23:55:10.404 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:10.427 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:10.427 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: Reconfiguring grafana.vm03 (dependencies changed)... 2026-03-06T23:55:10.427 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: Reconfiguring daemon grafana.vm03 on vm03 2026-03-06T23:55:10.427 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: mgrmap e18: vm03.xzkqce(active, since 20s), standbys: vm08.bnopnr 2026-03-06T23:55:10.427 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr metadata", "who": "vm08.bnopnr", "id": "vm08.bnopnr"}]: dispatch 2026-03-06T23:55:10.427 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:10.428 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:10.428 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:10 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:10.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:10.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: Reconfiguring grafana.vm03 (dependencies changed)... 2026-03-06T23:55:10.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: Reconfiguring daemon grafana.vm03 on vm03 2026-03-06T23:55:10.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: mgrmap e18: vm03.xzkqce(active, since 20s), standbys: vm08.bnopnr 2026-03-06T23:55:10.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr metadata", "who": "vm08.bnopnr", "id": "vm08.bnopnr"}]: dispatch 2026-03-06T23:55:10.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:10.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:10.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:10 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:10.843 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:11.209 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:11.254 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-06T23:55:11.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-06T23:55:11.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-06T23:55:11.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: from='client.14264 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:55:11.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: Marking host: vm03 for OSDSpec preview refresh. 2026-03-06T23:55:11.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: Marking host: vm08 for OSDSpec preview refresh. 2026-03-06T23:55:11.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: Saving service osd.all-available-devices spec with placement * 2026-03-06T23:55:11.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:11.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:11.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm08", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T23:55:11.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:11.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:11 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1765778388' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: from='client.14264 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: Marking host: vm03 for OSDSpec preview refresh. 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: Marking host: vm08 for OSDSpec preview refresh. 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: Saving service osd.all-available-devices spec with placement * 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm08", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]: dispatch 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:11 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1765778388' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:12.255 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:12.574 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:12.575 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: Reconfiguring ceph-exporter.vm08 (monmap changed)... 2026-03-06T23:55:12.575 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: Reconfiguring daemon ceph-exporter.vm08 on vm08 2026-03-06T23:55:12.575 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.590 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:12.937 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:12.937 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:12.937 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: Reconfiguring ceph-exporter.vm08 (monmap changed)... 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: Reconfiguring daemon ceph-exporter.vm08 on vm08 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: Reconfiguring crash.vm08 (monmap changed)... 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: Reconfiguring daemon crash.vm08 on vm08 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.bnopnr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T23:55:12.938 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:12 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:12.989 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: Reconfiguring crash.vm08 (monmap changed)... 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.crash.vm08", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]: dispatch 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: Reconfiguring daemon crash.vm08 on vm08 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.vm08.bnopnr", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-06T23:55:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-06T23:55:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:12 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: Reconfiguring mgr.vm08.bnopnr (monmap changed)... 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: Reconfiguring daemon mgr.vm08.bnopnr on vm08 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: Reconfiguring mon.vm08 (monmap changed)... 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: Reconfiguring daemon mon.vm08 on vm08 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.577 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: Reconfiguring mgr.vm08.bnopnr (monmap changed)... 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: Reconfiguring daemon mgr.vm08.bnopnr on vm08 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: Reconfiguring mon.vm08 (monmap changed)... 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: Reconfiguring daemon mon.vm08 on vm08 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:55:13.578 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:13 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/117222536' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:13.877 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-06T23:55:13.877 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:13.877 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:55:13.877 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:13 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/117222536' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:13.989 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:14.364 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T23:55:14.626 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:14 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm03.local:9093"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm03.local:3000"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm03.local:9095"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-06T23:55:14.712 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:14 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:14.781 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:14.856 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-03-06T23:55:15.657 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:15 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/959649153' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:15.657 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:15 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/2812831687' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "701b2fe5-c723-48b2-a1c5-ad56099eb19c"}]: dispatch 2026-03-06T23:55:15.658 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:15 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "701b2fe5-c723-48b2-a1c5-ad56099eb19c"}]: dispatch 2026-03-06T23:55:15.658 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:15 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "701b2fe5-c723-48b2-a1c5-ad56099eb19c"}]': finished 2026-03-06T23:55:15.658 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:15 vm08 ceph-mon[56019]: osdmap e6: 1 total, 0 up, 1 in 2026-03-06T23:55:15.658 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:15 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:15.800 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:15 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/959649153' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:15.800 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:15 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/2812831687' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "701b2fe5-c723-48b2-a1c5-ad56099eb19c"}]: dispatch 2026-03-06T23:55:15.800 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:15 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "701b2fe5-c723-48b2-a1c5-ad56099eb19c"}]: dispatch 2026-03-06T23:55:15.800 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:15 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "701b2fe5-c723-48b2-a1c5-ad56099eb19c"}]': finished 2026-03-06T23:55:15.800 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:15 vm03 ceph-mon[48028]: osdmap e6: 1 total, 0 up, 1 in 2026-03-06T23:55:15.800 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:15 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:15.857 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:16.222 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:16.552 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:16.624 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1772837715,"num_remapped_pgs":0} 2026-03-06T23:55:16.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:16.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1769432420' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "95b3b875-de30-48a2-9299-b98c1021c33f"}]: dispatch 2026-03-06T23:55:16.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1769432420' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "95b3b875-de30-48a2-9299-b98c1021c33f"}]': finished 2026-03-06T23:55:16.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: osdmap e7: 2 total, 0 up, 2 in 2026-03-06T23:55:16.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:16.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:16.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/3230069501' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:16.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2339358894' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:16.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:16 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2891508380' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1769432420' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "95b3b875-de30-48a2-9299-b98c1021c33f"}]: dispatch 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1769432420' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "95b3b875-de30-48a2-9299-b98c1021c33f"}]': finished 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: osdmap e7: 2 total, 0 up, 2 in 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3230069501' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2339358894' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:17.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:16 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2891508380' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:17.625 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:17.942 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:17.963 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:17 vm03 ceph-mon[48028]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:17.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:17 vm08 ceph-mon[56019]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:18.251 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:18.308 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1772837715,"num_remapped_pgs":0} 2026-03-06T23:55:18.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:18 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/3017515251' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:18.973 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:18 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3017515251' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:19.310 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:19.653 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:19.698 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:19.698 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:19.699 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/1064449144' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "043996d9-da37-4bab-ab77-3dc0b3e20036"}]: dispatch 2026-03-06T23:55:19.699 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "043996d9-da37-4bab-ab77-3dc0b3e20036"}]: dispatch 2026-03-06T23:55:19.699 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "043996d9-da37-4bab-ab77-3dc0b3e20036"}]': finished 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1064449144' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "043996d9-da37-4bab-ab77-3dc0b3e20036"}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "043996d9-da37-4bab-ab77-3dc0b3e20036"}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "043996d9-da37-4bab-ab77-3dc0b3e20036"}]': finished 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: osdmap e8: 3 total, 0 up, 3 in 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1337440102' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/532557998' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b2c4b60c-dcce-4d36-b5bb-ca54018766c4"}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/532557998' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b2c4b60c-dcce-4d36-b5bb-ca54018766c4"}]': finished 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: osdmap e9: 4 total, 0 up, 4 in 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:19.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:19 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:19.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: osdmap e8: 3 total, 0 up, 3 in 2026-03-06T23:55:19.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:19.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:19.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:19.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/1337440102' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:19.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/532557998' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b2c4b60c-dcce-4d36-b5bb-ca54018766c4"}]: dispatch 2026-03-06T23:55:19.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/532557998' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b2c4b60c-dcce-4d36-b5bb-ca54018766c4"}]': finished 2026-03-06T23:55:19.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: osdmap e9: 4 total, 0 up, 4 in 2026-03-06T23:55:19.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:19.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:19.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:19.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:19 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:20.008 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:20.090 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1772837719,"num_remapped_pgs":0} 2026-03-06T23:55:20.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:20 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/3602034794' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:20.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:20 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/71464148' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:21.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:20 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3602034794' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:21.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:20 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/71464148' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:21.091 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:21.407 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:21.720 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:21.802 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1772837719,"num_remapped_pgs":0} 2026-03-06T23:55:21.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:21 vm08 ceph-mon[56019]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:22.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:21 vm03 ceph-mon[48028]: pgmap v13: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:22.803 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:22.825 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3402962930' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/265023251' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd90deb9-db37-45e1-b727-12ab786acc3e"}]: dispatch 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd90deb9-db37-45e1-b727-12ab786acc3e"}]: dispatch 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bd90deb9-db37-45e1-b727-12ab786acc3e"}]': finished 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: osdmap e10: 5 total, 0 up, 5 in 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:22.826 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:22 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:22.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/3402962930' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:22.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/265023251' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd90deb9-db37-45e1-b727-12ab786acc3e"}]: dispatch 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bd90deb9-db37-45e1-b727-12ab786acc3e"}]: dispatch 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bd90deb9-db37-45e1-b727-12ab786acc3e"}]': finished 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: osdmap e10: 5 total, 0 up, 5 in 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:22.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:22 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:23.143 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:23.496 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:23.581 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1772837723,"num_remapped_pgs":0} 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/1664572957' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/4209614525' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "13033dde-57fa-42c6-a95b-9cab813c93a3"}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/4209614525' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "13033dde-57fa-42c6-a95b-9cab813c93a3"}]': finished 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: osdmap e11: 6 total, 0 up, 6 in 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2348182622' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:23 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/441028367' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:23.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:23.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/1664572957' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:23.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/4209614525' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "13033dde-57fa-42c6-a95b-9cab813c93a3"}]: dispatch 2026-03-06T23:55:23.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/4209614525' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "13033dde-57fa-42c6-a95b-9cab813c93a3"}]': finished 2026-03-06T23:55:23.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: osdmap e11: 6 total, 0 up, 6 in 2026-03-06T23:55:23.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:23.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:23.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:23.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:23.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:23.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:23.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2348182622' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:23.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:23 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/441028367' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:24.582 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:24.915 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:25.248 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:25.302 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1772837723,"num_remapped_pgs":0} 2026-03-06T23:55:26.048 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:25 vm08 ceph-mon[56019]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:26.048 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:25 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1354150436' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:26.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:25 vm03 ceph-mon[48028]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:26.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:25 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1354150436' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:26.303 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:26.683 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:26.955 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3758720719' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bc010e6f-0018-4ea9-a094-62c7f3721283"}]: dispatch 2026-03-06T23:55:26.955 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bc010e6f-0018-4ea9-a094-62c7f3721283"}]: dispatch 2026-03-06T23:55:26.955 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bc010e6f-0018-4ea9-a094-62c7f3721283"}]': finished 2026-03-06T23:55:26.955 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: osdmap e12: 7 total, 0 up, 7 in 2026-03-06T23:55:26.956 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:27.061 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:27.132 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:27.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/3758720719' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bc010e6f-0018-4ea9-a094-62c7f3721283"}]: dispatch 2026-03-06T23:55:27.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "bc010e6f-0018-4ea9-a094-62c7f3721283"}]: dispatch 2026-03-06T23:55:27.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "bc010e6f-0018-4ea9-a094-62c7f3721283"}]': finished 2026-03-06T23:55:27.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: osdmap e12: 7 total, 0 up, 7 in 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/3105558281' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/470992226' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dcbdaaaf-999f-4d53-884e-1b10c94faa7e"}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/470992226' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dcbdaaaf-999f-4d53-884e-1b10c94faa7e"}]': finished 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: osdmap e13: 8 total, 0 up, 8 in 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:27.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:26 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/3105558281' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/470992226' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "dcbdaaaf-999f-4d53-884e-1b10c94faa7e"}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/470992226' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dcbdaaaf-999f-4d53-884e-1b10c94faa7e"}]': finished 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: osdmap e13: 8 total, 0 up, 8 in 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:27.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:26 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:28.133 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:28.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:27 vm08 ceph-mon[56019]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:28.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:27 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2305500105' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:28.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:27 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/4119795257' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:28.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:27 vm03 ceph-mon[48028]: pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:28.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:27 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2305500105' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-06T23:55:28.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:27 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/4119795257' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:28.462 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:28.774 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:28.836 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:29.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:28 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2766550411' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:29.211 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:28 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2766550411' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:29.837 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:30.181 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:30.219 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:29 vm03 ceph-mon[48028]: pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:30.248 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:29 vm08 ceph-mon[56019]: pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:30.574 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:30.633 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:31.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:30 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-06T23:55:31.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:30 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:31.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:30 vm08 ceph-mon[56019]: Deploying daemon osd.0 on vm08 2026-03-06T23:55:31.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:30 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1247557300' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:31.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:30 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-06T23:55:31.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:30 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:31.287 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:30 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-06T23:55:31.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:30 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:31.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:30 vm03 ceph-mon[48028]: Deploying daemon osd.0 on vm08 2026-03-06T23:55:31.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:30 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1247557300' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:31.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:30 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-06T23:55:31.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:30 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:31.634 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:32.037 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:32.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:31 vm08 ceph-mon[56019]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:32.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:31 vm08 ceph-mon[56019]: Deploying daemon osd.1 on vm03 2026-03-06T23:55:32.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:31 vm03 ceph-mon[48028]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:32.260 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:31 vm03 ceph-mon[48028]: Deploying daemon osd.1 on vm03 2026-03-06T23:55:32.466 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:32.562 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:32.987 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:32 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2491176340' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:33.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:32 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2491176340' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:33.563 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:33.886 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: Deploying daemon osd.2 on vm08 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-06T23:55:34.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:34 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:34.304 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:34.366 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: pgmap v23: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: Deploying daemon osd.2 on vm08 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-06T23:55:34.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:34 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:35.227 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:35 vm03 ceph-mon[48028]: Deploying daemon osd.3 on vm03 2026-03-06T23:55:35.367 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:35.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:35 vm08 ceph-mon[56019]: Deploying daemon osd.3 on vm03 2026-03-06T23:55:35.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:35 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/4222390749' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:35.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:35 vm08 ceph-mon[56019]: from='osd.0 [v2:192.168.123.108:6800/2743874051,v1:192.168.123.108:6801/2743874051]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T23:55:35.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:35 vm08 ceph-mon[56019]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T23:55:35.498 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:35 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/4222390749' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:35.498 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:35 vm03 ceph-mon[48028]: from='osd.0 [v2:192.168.123.108:6800/2743874051,v1:192.168.123.108:6801/2743874051]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T23:55:35.498 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:35 vm03 ceph-mon[48028]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-06T23:55:35.802 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:36.127 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:36.211 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":14,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='osd.0 [v2:192.168.123.108:6800/2743874051,v1:192.168.123.108:6801/2743874051]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: osdmap e14: 8 total, 0 up, 8 in 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-06T23:55:36.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:36 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/4273795937' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='osd.0 [v2:192.168.123.108:6800/2743874051,v1:192.168.123.108:6801/2743874051]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: osdmap e14: 8 total, 0 up, 8 in 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-06T23:55:36.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:36 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/4273795937' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:37.212 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: osdmap e15: 8 total, 0 up, 8 in 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: Deploying daemon osd.4 on vm08 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: osd.0 [v2:192.168.123.108:6800/2743874051,v1:192.168.123.108:6801/2743874051] boot 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: osdmap e16: 8 total, 1 up, 8 in 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:37.496 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:37 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: osdmap e15: 8 total, 0 up, 8 in 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: Deploying daemon osd.4 on vm08 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: osd.0 [v2:192.168.123.108:6800/2743874051,v1:192.168.123.108:6801/2743874051] boot 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: osdmap e16: 8 total, 1 up, 8 in 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:37.509 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:37 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:37.631 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:38.003 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:38.067 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":16,"num_osds":8,"num_up_osds":1,"osd_up_since":1772837737,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: Deploying daemon osd.5 on vm03 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='osd.2 [v2:192.168.123.108:6808/1410769664,v1:192.168.123.108:6809/1410769664]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-06T23:55:38.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:38 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2017167930' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: Deploying daemon osd.5 on vm03 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='osd.2 [v2:192.168.123.108:6808/1410769664,v1:192.168.123.108:6809/1410769664]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-06T23:55:38.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:38 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2017167930' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:39.068 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815] boot 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: osdmap e17: 8 total, 2 up, 8 in 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='osd.2 [v2:192.168.123.108:6808/1410769664,v1:192.168.123.108:6809/1410769664]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:39.499 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:39 vm03 ceph-mon[48028]: osdmap e18: 8 total, 2 up, 8 in 2026-03-06T23:55:39.527 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:39.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:39.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: osd.1 [v2:192.168.123.103:6802/3895163815,v1:192.168.123.103:6803/3895163815] boot 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: osdmap e17: 8 total, 2 up, 8 in 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='osd.2 [v2:192.168.123.108:6808/1410769664,v1:192.168.123.108:6809/1410769664]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:39.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:39 vm08 ceph-mon[56019]: osdmap e18: 8 total, 2 up, 8 in 2026-03-06T23:55:39.981 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:40.074 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":18,"num_osds":8,"num_up_osds":2,"osd_up_since":1772837738,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: pgmap v30: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: Deploying daemon osd.6 on vm08 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/4006746612' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='osd.2 ' entity='osd.2' 2026-03-06T23:55:40.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:40 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:40.605 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: pgmap v30: 0 pgs: ; 0 B data, 26 MiB used, 20 GiB / 20 GiB avail 2026-03-06T23:55:40.605 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:40.605 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:40.605 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:40.605 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:40.605 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:40.605 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: Deploying daemon osd.6 on vm08 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/4006746612' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='osd.2 ' entity='osd.2' 2026-03-06T23:55:40.606 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:40 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:41.075 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: osd.2 [v2:192.168.123.108:6808/1410769664,v1:192.168.123.108:6809/1410769664] boot 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: osdmap e19: 8 total, 3 up, 8 in 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='osd.4 [v2:192.168.123.108:6816/2504550642,v1:192.168.123.108:6817/2504550642]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:41.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:41 vm08 ceph-mon[56019]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-06T23:55:41.495 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:41.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:41.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:41.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-06T23:55:41.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: osd.2 [v2:192.168.123.108:6808/1410769664,v1:192.168.123.108:6809/1410769664] boot 2026-03-06T23:55:41.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: osdmap e19: 8 total, 3 up, 8 in 2026-03-06T23:55:41.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:41.593 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='osd.4 [v2:192.168.123.108:6816/2504550642,v1:192.168.123.108:6817/2504550642]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:41.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:41 vm03 ceph-mon[48028]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-06T23:55:41.966 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:42.058 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":20,"num_osds":8,"num_up_osds":3,"osd_up_since":1772837740,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: Deploying daemon osd.7 on vm03 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='osd.4 [v2:192.168.123.108:6816/2504550642,v1:192.168.123.108:6817/2504550642]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: osdmap e20: 8 total, 3 up, 8 in 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/724877922' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: osdmap e21: 8 total, 3 up, 8 in 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:42.569 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:42 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: pgmap v33: 0 pgs: ; 0 B data, 53 MiB used, 40 GiB / 40 GiB avail 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: Deploying daemon osd.7 on vm03 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='osd.4 [v2:192.168.123.108:6816/2504550642,v1:192.168.123.108:6817/2504550642]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: osdmap e20: 8 total, 3 up, 8 in 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/724877922' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068]' entity='osd.3' 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: osdmap e21: 8 total, 3 up, 8 in 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:42.687 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:42 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:43.059 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:43.374 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 sudo[65918]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-06T23:55:43.374 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 sudo[65918]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-06T23:55:43.374 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 sudo[65918]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-06T23:55:43.374 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 sudo[65918]: pam_unix(sudo:session): session closed for user root 2026-03-06T23:55:43.550 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 sudo[71878]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 sudo[71878]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 sudo[71878]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 sudo[71878]: pam_unix(sudo:session): session closed for user root 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068] boot 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: osdmap e22: 8 total, 4 up, 8 in 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:43.601 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:43 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm03"}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "mon metadata", "id": "vm08"}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: osd.3 [v2:192.168.123.103:6810/3949859068,v1:192.168.123.103:6811/3949859068] boot 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: osdmap e22: 8 total, 4 up, 8 in 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:43.628 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:43.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:43.629 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:43 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:44.049 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:44.137 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":22,"num_osds":8,"num_up_osds":4,"osd_up_since":1772837743,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: pgmap v36: 1 pgs: 1 unknown; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='osd.4 ' entity='osd.4' 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2469748891' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: osd.4 [v2:192.168.123.108:6816/2504550642,v1:192.168.123.108:6817/2504550642] boot 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: osdmap e23: 8 total, 5 up, 8 in 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='osd.6 [v2:192.168.123.108:6824/1083263007,v1:192.168.123.108:6825/1083263007]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T23:55:44.630 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:44 vm03 ceph-mon[48028]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: pgmap v36: 1 pgs: 1 unknown; 0 B data, 479 MiB used, 60 GiB / 60 GiB avail 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='osd.4 ' entity='osd.4' 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2469748891' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: osd.4 [v2:192.168.123.108:6816/2504550642,v1:192.168.123.108:6817/2504550642] boot 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: osdmap e23: 8 total, 5 up, 8 in 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='osd.6 [v2:192.168.123.108:6824/1083263007,v1:192.168.123.108:6825/1083263007]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T23:55:44.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:44 vm08 ceph-mon[56019]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-06T23:55:45.137 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:45.607 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: pgmap v39: 1 pgs: 1 peering; 449 KiB data, 506 MiB used, 79 GiB / 80 GiB avail 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='osd.6 [v2:192.168.123.108:6824/1083263007,v1:192.168.123.108:6825/1083263007]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: osdmap e24: 8 total, 5 up, 8 in 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: mgrmap e19: vm03.xzkqce(active, since 56s), standbys: vm08.bnopnr 2026-03-06T23:55:46.006 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:45 vm03 ceph-mon[48028]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' 2026-03-06T23:55:46.026 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:46.114 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":24,"num_osds":8,"num_up_osds":5,"osd_up_since":1772837744,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:46.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: pgmap v39: 1 pgs: 1 peering; 449 KiB data, 506 MiB used, 79 GiB / 80 GiB avail 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='osd.6 [v2:192.168.123.108:6824/1083263007,v1:192.168.123.108:6825/1083263007]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: osdmap e24: 8 total, 5 up, 8 in 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]: dispatch 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: mgrmap e19: vm03.xzkqce(active, since 56s), standbys: vm08.bnopnr 2026-03-06T23:55:46.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:45 vm08 ceph-mon[56019]: from='osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671]' entity='osd.5' 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2988394048' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671] boot 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: osdmap e25: 8 total, 6 up, 8 in 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:46.920 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:46 vm03 ceph-mon[48028]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-06T23:55:47.115 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:47.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2988394048' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:47.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm08", "root=default"]}]': finished 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: osd.5 [v2:192.168.123.103:6818/1545819671,v1:192.168.123.103:6819/1545819671] boot 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: osdmap e25: 8 total, 6 up, 8 in 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:47.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:46 vm08 ceph-mon[56019]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-06T23:55:47.532 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:47.912 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:47.972 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":26,"num_osds":8,"num_up_osds":6,"osd_up_since":1772837746,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":0} 2026-03-06T23:55:48.187 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:48.188 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:48.188 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: pgmap v42: 1 pgs: 1 peering; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-06T23:55:48.188 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:48.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:48.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:48.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: pgmap v42: 1 pgs: 1 peering; 449 KiB data, 133 MiB used, 100 GiB / 100 GiB avail 2026-03-06T23:55:48.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:48.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:48.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:55:48.388 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-06T23:55:48.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: osdmap e26: 8 total, 6 up, 8 in 2026-03-06T23:55:48.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:48.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:48.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:48.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:48.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='osd.6 ' entity='osd.6' 2026-03-06T23:55:48.389 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:48 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1283683804' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: osdmap e26: 8 total, 6 up, 8 in 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]: dispatch 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='osd.6 ' entity='osd.6' 2026-03-06T23:55:48.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:48 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1283683804' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:48.973 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd stat -f json 2026-03-06T23:55:49.445 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: osd.6 [v2:192.168.123.108:6824/1083263007,v1:192.168.123.108:6825/1083263007] boot 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: osdmap e27: 8 total, 7 up, 8 in 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:49 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964]' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm03", "root=default"]}]': finished 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: osd.6 [v2:192.168.123.108:6824/1083263007,v1:192.168.123.108:6825/1083263007] boot 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: osdmap e27: 8 total, 7 up, 8 in 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:49.497 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:49 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:55:49.822 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:49.870 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":28,"num_osds":8,"num_up_osds":8,"osd_up_since":1772837749,"num_in_osds":8,"osd_in_since":1772837726,"num_remapped_pgs":1} 2026-03-06T23:55:49.870 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd dump --format=json 2026-03-06T23:55:50.215 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: purged_snaps scrub starts 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: purged_snaps scrub ok 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: pgmap v45: 1 pgs: 1 peering; 449 KiB data, 559 MiB used, 119 GiB / 120 GiB avail 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964] boot 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: osdmap e28: 8 total, 8 up, 8 in 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:55:50.544 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:50 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2732745186' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:50.656 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:50.656 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":29,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","created":"2026-03-06T22:53:25.536586+0000","modified":"2026-03-06T22:55:50.375501+0000","last_up_change":"2026-03-06T22:55:49.371787+0000","last_in_change":"2026-03-06T22:55:26.529506+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":16,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-06T22:55:40.922610+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"22","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"701b2fe5-c723-48b2-a1c5-ad56099eb19c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6801","nonce":2743874051}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6803","nonce":2743874051}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6807","nonce":2743874051}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6805","nonce":2743874051}]},"public_addr":"192.168.123.108:6801/2743874051","cluster_addr":"192.168.123.108:6803/2743874051","heartbeat_back_addr":"192.168.123.108:6807/2743874051","heartbeat_front_addr":"192.168.123.108:6805/2743874051","state":["exists","up"]},{"osd":1,"uuid":"95b3b875-de30-48a2-9299-b98c1021c33f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6803","nonce":3895163815}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6805","nonce":3895163815}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6809","nonce":3895163815}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6807","nonce":3895163815}]},"public_addr":"192.168.123.103:6803/3895163815","cluster_addr":"192.168.123.103:6805/3895163815","heartbeat_back_addr":"192.168.123.103:6809/3895163815","heartbeat_front_addr":"192.168.123.103:6807/3895163815","state":["exists","up"]},{"osd":2,"uuid":"043996d9-da37-4bab-ab77-3dc0b3e20036","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6809","nonce":1410769664}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6811","nonce":1410769664}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6815","nonce":1410769664}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6813","nonce":1410769664}]},"public_addr":"192.168.123.108:6809/1410769664","cluster_addr":"192.168.123.108:6811/1410769664","heartbeat_back_addr":"192.168.123.108:6815/1410769664","heartbeat_front_addr":"192.168.123.108:6813/1410769664","state":["exists","up"]},{"osd":3,"uuid":"b2c4b60c-dcce-4d36-b5bb-ca54018766c4","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6811","nonce":3949859068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6813","nonce":3949859068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6817","nonce":3949859068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6815","nonce":3949859068}]},"public_addr":"192.168.123.103:6811/3949859068","cluster_addr":"192.168.123.103:6813/3949859068","heartbeat_back_addr":"192.168.123.103:6817/3949859068","heartbeat_front_addr":"192.168.123.103:6815/3949859068","state":["exists","up"]},{"osd":4,"uuid":"bd90deb9-db37-45e1-b727-12ab786acc3e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6817","nonce":2504550642}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6819","nonce":2504550642}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6823","nonce":2504550642}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6821","nonce":2504550642}]},"public_addr":"192.168.123.108:6817/2504550642","cluster_addr":"192.168.123.108:6819/2504550642","heartbeat_back_addr":"192.168.123.108:6823/2504550642","heartbeat_front_addr":"192.168.123.108:6821/2504550642","state":["exists","up"]},{"osd":5,"uuid":"13033dde-57fa-42c6-a95b-9cab813c93a3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6819","nonce":1545819671}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6821","nonce":1545819671}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6825","nonce":1545819671}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6823","nonce":1545819671}]},"public_addr":"192.168.123.103:6819/1545819671","cluster_addr":"192.168.123.103:6821/1545819671","heartbeat_back_addr":"192.168.123.103:6825/1545819671","heartbeat_front_addr":"192.168.123.103:6823/1545819671","state":["exists","up"]},{"osd":6,"uuid":"bc010e6f-0018-4ea9-a094-62c7f3721283","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6825","nonce":1083263007}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6827","nonce":1083263007}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6831","nonce":1083263007}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6829","nonce":1083263007}]},"public_addr":"192.168.123.108:6825/1083263007","cluster_addr":"192.168.123.108:6827/1083263007","heartbeat_back_addr":"192.168.123.108:6831/1083263007","heartbeat_front_addr":"192.168.123.108:6829/1083263007","state":["exists","up"]},{"osd":7,"uuid":"dcbdaaaf-999f-4d53-884e-1b10c94faa7e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6827","nonce":977971964}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6829","nonce":977971964}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6832","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6833","nonce":977971964}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6831","nonce":977971964}]},"public_addr":"192.168.123.103:6827/977971964","cluster_addr":"192.168.123.103:6829/977971964","heartbeat_back_addr":"192.168.123.103:6833/977971964","heartbeat_front_addr":"192.168.123.103:6831/977971964","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:35.873655+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:36.629275+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:38.739662+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:40.457150+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:41.696092+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:43.837483+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:45.322311+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:47.574564+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/1702930659":"2026-03-07T22:54:48.882784+0000","192.168.123.103:0/2772543791":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6800/2470745140":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/1003274800":"2026-03-07T22:53:52.758173+0000","192.168.123.103:0/2059920708":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6801/2395084648":"2026-03-07T22:54:48.882784+0000","192.168.123.103:0/2088783198":"2026-03-07T22:54:48.882784+0000","192.168.123.103:6800/2538238857":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6801/2538238857":"2026-03-07T22:53:52.758173+0000","192.168.123.103:0/2541355550":"2026-03-07T22:54:48.882784+0000","192.168.123.103:6801/2470745140":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/245071339":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/2764704567":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/2031024266":"2026-03-07T22:54:12.185040+0000","192.168.123.103:6800/2395084648":"2026-03-07T22:54:48.882784+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-06T23:55:50.730 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-06T22:55:40.922610+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '22', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-03-06T23:55:50.730 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd pool get .mgr pg_num 2026-03-06T23:55:50.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: purged_snaps scrub starts 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: purged_snaps scrub ok 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: pgmap v45: 1 pgs: 1 peering; 449 KiB data, 559 MiB used, 119 GiB / 120 GiB avail 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: osd.7 [v2:192.168.123.103:6826/977971964,v1:192.168.123.103:6827/977971964] boot 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: osdmap e28: 8 total, 8 up, 8 in 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm08", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:55:50.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:50 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2732745186' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-06T23:55:51.062 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:51.374 INFO:teuthology.orchestra.run.vm03.stdout:pg_num: 1 2026-03-06T23:55:51.451 INFO:tasks.cephadm:Setting up client nodes... 2026-03-06T23:55:51.451 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-06T23:55:51.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: Detected new or changed devices on vm08 2026-03-06T23:55:51.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: osdmap e29: 8 total, 8 up, 8 in 2026-03-06T23:55:51.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: Detected new or changed devices on vm03 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/4170791288' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T23:55:51.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:51 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1977262617' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-06T23:55:51.778 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: Detected new or changed devices on vm08 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: osdmap e29: 8 total, 8 up, 8 in 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: Detected new or changed devices on vm03 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config rm", "who": "osd/host:vm03", "name": "osd_memory_target"}]: dispatch 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/4170791288' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T23:55:51.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:51 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1977262617' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-06T23:55:52.138 INFO:teuthology.orchestra.run.vm03.stdout:[client.0] 2026-03-06T23:55:52.138 INFO:teuthology.orchestra.run.vm03.stdout: key = AQB4W6tp+5i7BxAAEe6BndpZr2izJDmZST38yQ== 2026-03-06T23:55:52.191 DEBUG:teuthology.orchestra.run.vm03:> set -ex 2026-03-06T23:55:52.191 DEBUG:teuthology.orchestra.run.vm03:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-06T23:55:52.191 DEBUG:teuthology.orchestra.run.vm03:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-06T23:55:52.226 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-06T23:55:52.401 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:52 vm03 ceph-mon[48028]: pgmap v48: 1 pgs: 1 peering; 449 KiB data, 1013 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:55:52.401 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:52 vm03 ceph-mon[48028]: osdmap e30: 8 total, 8 up, 8 in 2026-03-06T23:55:52.401 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:52 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/353666618' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T23:55:52.401 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:52 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/353666618' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T23:55:52.537 INFO:teuthology.orchestra.run.vm08.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm08/config 2026-03-06T23:55:52.559 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:52 vm08 ceph-mon[56019]: pgmap v48: 1 pgs: 1 peering; 449 KiB data, 1013 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:55:52.560 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:52 vm08 ceph-mon[56019]: osdmap e30: 8 total, 8 up, 8 in 2026-03-06T23:55:52.560 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:52 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/353666618' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T23:55:52.560 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:52 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/353666618' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T23:55:52.893 INFO:teuthology.orchestra.run.vm08.stdout:[client.1] 2026-03-06T23:55:52.893 INFO:teuthology.orchestra.run.vm08.stdout: key = AQB4W6tpJJ+uNBAAMhXsWU3b1tExtrOJTBTmMg== 2026-03-06T23:55:53.098 DEBUG:teuthology.orchestra.run.vm08:> set -ex 2026-03-06T23:55:53.098 DEBUG:teuthology.orchestra.run.vm08:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-06T23:55:53.098 DEBUG:teuthology.orchestra.run.vm08:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-06T23:55:53.139 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-06T23:55:53.139 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-06T23:55:53.139 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph mgr dump --format=json 2026-03-06T23:55:53.496 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:53.521 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:53 vm03 ceph-mon[48028]: from='client.? 192.168.123.108:0/2831991862' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T23:55:53.521 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:53 vm03 ceph-mon[48028]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T23:55:53.521 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:53 vm03 ceph-mon[48028]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T23:55:53.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:53 vm08 ceph-mon[56019]: from='client.? 192.168.123.108:0/2831991862' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T23:55:53.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:53 vm08 ceph-mon[56019]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-06T23:55:53.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:53 vm08 ceph-mon[56019]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-06T23:55:53.860 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:53.935 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":19,"flags":0,"active_gid":14221,"active_name":"vm03.xzkqce","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6800","nonce":3374258982},{"type":"v1","addr":"192.168.123.103:6801","nonce":3374258982}]},"active_addr":"192.168.123.103:6801/3374258982","active_change":"2026-03-06T22:54:48.882872+0000","active_mgr_features":4540701547738038271,"available":true,"standbys":[{"gid":14252,"name":"vm08.bnopnr","mgr_features":4540701547738038271,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.25.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:10.4.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.7.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.2.5","min":"","max":"","enum_allowed":[],"desc":"Nvme-of container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.51.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:devbuilds-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba/SMB container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_requests":{"name":"max_requests","type":"int","level":"advanced","flags":0,"default_value":"500","min":"","max":"","enum_allowed":[],"desc":"Maximum number of requests to keep in memory. When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number. if un-finished request is removed, error message will be logged in the ceph-mgr log.","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.103:8443/","prometheus":"http://192.168.123.103:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":180696263}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":4288693814}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":3050301826}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.103:0","nonce":1003942540}]}]} 2026-03-06T23:55:53.937 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-06T23:55:53.937 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-06T23:55:53.937 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd dump --format=json 2026-03-06T23:55:54.266 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:54.625 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:54 vm03 ceph-mon[48028]: pgmap v50: 1 pgs: 1 peering; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:55:54.625 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:54 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1531215012' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-06T23:55:54.625 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:54.625 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":30,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","created":"2026-03-06T22:53:25.536586+0000","modified":"2026-03-06T22:55:51.386319+0000","last_up_change":"2026-03-06T22:55:49.371787+0000","last_in_change":"2026-03-06T22:55:26.529506+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":16,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-06T22:55:40.922610+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"22","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"701b2fe5-c723-48b2-a1c5-ad56099eb19c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6801","nonce":2743874051}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6803","nonce":2743874051}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6807","nonce":2743874051}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6805","nonce":2743874051}]},"public_addr":"192.168.123.108:6801/2743874051","cluster_addr":"192.168.123.108:6803/2743874051","heartbeat_back_addr":"192.168.123.108:6807/2743874051","heartbeat_front_addr":"192.168.123.108:6805/2743874051","state":["exists","up"]},{"osd":1,"uuid":"95b3b875-de30-48a2-9299-b98c1021c33f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6803","nonce":3895163815}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6805","nonce":3895163815}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6809","nonce":3895163815}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6807","nonce":3895163815}]},"public_addr":"192.168.123.103:6803/3895163815","cluster_addr":"192.168.123.103:6805/3895163815","heartbeat_back_addr":"192.168.123.103:6809/3895163815","heartbeat_front_addr":"192.168.123.103:6807/3895163815","state":["exists","up"]},{"osd":2,"uuid":"043996d9-da37-4bab-ab77-3dc0b3e20036","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6809","nonce":1410769664}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6811","nonce":1410769664}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6815","nonce":1410769664}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6813","nonce":1410769664}]},"public_addr":"192.168.123.108:6809/1410769664","cluster_addr":"192.168.123.108:6811/1410769664","heartbeat_back_addr":"192.168.123.108:6815/1410769664","heartbeat_front_addr":"192.168.123.108:6813/1410769664","state":["exists","up"]},{"osd":3,"uuid":"b2c4b60c-dcce-4d36-b5bb-ca54018766c4","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6811","nonce":3949859068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6813","nonce":3949859068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6817","nonce":3949859068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6815","nonce":3949859068}]},"public_addr":"192.168.123.103:6811/3949859068","cluster_addr":"192.168.123.103:6813/3949859068","heartbeat_back_addr":"192.168.123.103:6817/3949859068","heartbeat_front_addr":"192.168.123.103:6815/3949859068","state":["exists","up"]},{"osd":4,"uuid":"bd90deb9-db37-45e1-b727-12ab786acc3e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6817","nonce":2504550642}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6819","nonce":2504550642}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6823","nonce":2504550642}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6821","nonce":2504550642}]},"public_addr":"192.168.123.108:6817/2504550642","cluster_addr":"192.168.123.108:6819/2504550642","heartbeat_back_addr":"192.168.123.108:6823/2504550642","heartbeat_front_addr":"192.168.123.108:6821/2504550642","state":["exists","up"]},{"osd":5,"uuid":"13033dde-57fa-42c6-a95b-9cab813c93a3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6819","nonce":1545819671}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6821","nonce":1545819671}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6825","nonce":1545819671}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6823","nonce":1545819671}]},"public_addr":"192.168.123.103:6819/1545819671","cluster_addr":"192.168.123.103:6821/1545819671","heartbeat_back_addr":"192.168.123.103:6825/1545819671","heartbeat_front_addr":"192.168.123.103:6823/1545819671","state":["exists","up"]},{"osd":6,"uuid":"bc010e6f-0018-4ea9-a094-62c7f3721283","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6825","nonce":1083263007}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6827","nonce":1083263007}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6831","nonce":1083263007}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6829","nonce":1083263007}]},"public_addr":"192.168.123.108:6825/1083263007","cluster_addr":"192.168.123.108:6827/1083263007","heartbeat_back_addr":"192.168.123.108:6831/1083263007","heartbeat_front_addr":"192.168.123.108:6829/1083263007","state":["exists","up"]},{"osd":7,"uuid":"dcbdaaaf-999f-4d53-884e-1b10c94faa7e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6827","nonce":977971964}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6829","nonce":977971964}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6832","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6833","nonce":977971964}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6831","nonce":977971964}]},"public_addr":"192.168.123.103:6827/977971964","cluster_addr":"192.168.123.103:6829/977971964","heartbeat_back_addr":"192.168.123.103:6833/977971964","heartbeat_front_addr":"192.168.123.103:6831/977971964","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:35.873655+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:36.629275+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:38.739662+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:40.457150+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:41.696092+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:43.837483+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:45.322311+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:47.574564+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/1702930659":"2026-03-07T22:54:48.882784+0000","192.168.123.103:0/2772543791":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6800/2470745140":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/1003274800":"2026-03-07T22:53:52.758173+0000","192.168.123.103:0/2059920708":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6801/2395084648":"2026-03-07T22:54:48.882784+0000","192.168.123.103:0/2088783198":"2026-03-07T22:54:48.882784+0000","192.168.123.103:6800/2538238857":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6801/2538238857":"2026-03-07T22:53:52.758173+0000","192.168.123.103:0/2541355550":"2026-03-07T22:54:48.882784+0000","192.168.123.103:6801/2470745140":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/245071339":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/2764704567":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/2031024266":"2026-03-07T22:54:12.185040+0000","192.168.123.103:6800/2395084648":"2026-03-07T22:54:48.882784+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-06T23:55:54.695 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-06T23:55:54.695 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd dump --format=json 2026-03-06T23:55:54.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:54 vm08 ceph-mon[56019]: pgmap v50: 1 pgs: 1 peering; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:55:54.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:54 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1531215012' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-06T23:55:55.028 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:55.339 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:55:55.339 INFO:teuthology.orchestra.run.vm03.stdout:{"epoch":30,"fsid":"386eb88a-19af-11f1-876d-93c9c802cc09","created":"2026-03-06T22:53:25.536586+0000","modified":"2026-03-06T22:55:51.386319+0000","last_up_change":"2026-03-06T22:55:49.371787+0000","last_in_change":"2026-03-06T22:55:26.529506+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":16,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"squid","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-06T22:55:40.922610+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"22","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"701b2fe5-c723-48b2-a1c5-ad56099eb19c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6800","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6801","nonce":2743874051}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6802","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6803","nonce":2743874051}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6806","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6807","nonce":2743874051}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6804","nonce":2743874051},{"type":"v1","addr":"192.168.123.108:6805","nonce":2743874051}]},"public_addr":"192.168.123.108:6801/2743874051","cluster_addr":"192.168.123.108:6803/2743874051","heartbeat_back_addr":"192.168.123.108:6807/2743874051","heartbeat_front_addr":"192.168.123.108:6805/2743874051","state":["exists","up"]},{"osd":1,"uuid":"95b3b875-de30-48a2-9299-b98c1021c33f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6802","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6803","nonce":3895163815}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6804","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6805","nonce":3895163815}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6808","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6809","nonce":3895163815}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6806","nonce":3895163815},{"type":"v1","addr":"192.168.123.103:6807","nonce":3895163815}]},"public_addr":"192.168.123.103:6803/3895163815","cluster_addr":"192.168.123.103:6805/3895163815","heartbeat_back_addr":"192.168.123.103:6809/3895163815","heartbeat_front_addr":"192.168.123.103:6807/3895163815","state":["exists","up"]},{"osd":2,"uuid":"043996d9-da37-4bab-ab77-3dc0b3e20036","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6808","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6809","nonce":1410769664}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6810","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6811","nonce":1410769664}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6814","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6815","nonce":1410769664}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6812","nonce":1410769664},{"type":"v1","addr":"192.168.123.108:6813","nonce":1410769664}]},"public_addr":"192.168.123.108:6809/1410769664","cluster_addr":"192.168.123.108:6811/1410769664","heartbeat_back_addr":"192.168.123.108:6815/1410769664","heartbeat_front_addr":"192.168.123.108:6813/1410769664","state":["exists","up"]},{"osd":3,"uuid":"b2c4b60c-dcce-4d36-b5bb-ca54018766c4","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6810","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6811","nonce":3949859068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6812","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6813","nonce":3949859068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6816","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6817","nonce":3949859068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6814","nonce":3949859068},{"type":"v1","addr":"192.168.123.103:6815","nonce":3949859068}]},"public_addr":"192.168.123.103:6811/3949859068","cluster_addr":"192.168.123.103:6813/3949859068","heartbeat_back_addr":"192.168.123.103:6817/3949859068","heartbeat_front_addr":"192.168.123.103:6815/3949859068","state":["exists","up"]},{"osd":4,"uuid":"bd90deb9-db37-45e1-b727-12ab786acc3e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6816","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6817","nonce":2504550642}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6818","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6819","nonce":2504550642}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6822","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6823","nonce":2504550642}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6820","nonce":2504550642},{"type":"v1","addr":"192.168.123.108:6821","nonce":2504550642}]},"public_addr":"192.168.123.108:6817/2504550642","cluster_addr":"192.168.123.108:6819/2504550642","heartbeat_back_addr":"192.168.123.108:6823/2504550642","heartbeat_front_addr":"192.168.123.108:6821/2504550642","state":["exists","up"]},{"osd":5,"uuid":"13033dde-57fa-42c6-a95b-9cab813c93a3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6818","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6819","nonce":1545819671}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6820","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6821","nonce":1545819671}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6824","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6825","nonce":1545819671}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6822","nonce":1545819671},{"type":"v1","addr":"192.168.123.103:6823","nonce":1545819671}]},"public_addr":"192.168.123.103:6819/1545819671","cluster_addr":"192.168.123.103:6821/1545819671","heartbeat_back_addr":"192.168.123.103:6825/1545819671","heartbeat_front_addr":"192.168.123.103:6823/1545819671","state":["exists","up"]},{"osd":6,"uuid":"bc010e6f-0018-4ea9-a094-62c7f3721283","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":27,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6824","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6825","nonce":1083263007}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6826","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6827","nonce":1083263007}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6830","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6831","nonce":1083263007}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.108:6828","nonce":1083263007},{"type":"v1","addr":"192.168.123.108:6829","nonce":1083263007}]},"public_addr":"192.168.123.108:6825/1083263007","cluster_addr":"192.168.123.108:6827/1083263007","heartbeat_back_addr":"192.168.123.108:6831/1083263007","heartbeat_front_addr":"192.168.123.108:6829/1083263007","state":["exists","up"]},{"osd":7,"uuid":"dcbdaaaf-999f-4d53-884e-1b10c94faa7e","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6826","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6827","nonce":977971964}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6828","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6829","nonce":977971964}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6832","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6833","nonce":977971964}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.103:6830","nonce":977971964},{"type":"v1","addr":"192.168.123.103:6831","nonce":977971964}]},"public_addr":"192.168.123.103:6827/977971964","cluster_addr":"192.168.123.103:6829/977971964","heartbeat_back_addr":"192.168.123.103:6833/977971964","heartbeat_front_addr":"192.168.123.103:6831/977971964","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:35.873655+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:36.629275+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:38.739662+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:40.457150+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:41.696092+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:43.837483+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:45.322311+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540701547738038271,"old_weight":0,"last_purged_snaps_scrub":"2026-03-06T22:55:47.574564+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.103:0/1702930659":"2026-03-07T22:54:48.882784+0000","192.168.123.103:0/2772543791":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6800/2470745140":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/1003274800":"2026-03-07T22:53:52.758173+0000","192.168.123.103:0/2059920708":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6801/2395084648":"2026-03-07T22:54:48.882784+0000","192.168.123.103:0/2088783198":"2026-03-07T22:54:48.882784+0000","192.168.123.103:6800/2538238857":"2026-03-07T22:53:52.758173+0000","192.168.123.103:6801/2538238857":"2026-03-07T22:53:52.758173+0000","192.168.123.103:0/2541355550":"2026-03-07T22:54:48.882784+0000","192.168.123.103:6801/2470745140":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/245071339":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/2764704567":"2026-03-07T22:54:12.185040+0000","192.168.123.103:0/2031024266":"2026-03-07T22:54:12.185040+0000","192.168.123.103:6800/2395084648":"2026-03-07T22:54:48.882784+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-06T23:55:55.407 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.0 flush_pg_stats 2026-03-06T23:55:55.407 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.1 flush_pg_stats 2026-03-06T23:55:55.408 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.2 flush_pg_stats 2026-03-06T23:55:55.408 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.3 flush_pg_stats 2026-03-06T23:55:55.408 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.4 flush_pg_stats 2026-03-06T23:55:55.408 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.5 flush_pg_stats 2026-03-06T23:55:55.408 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.6 flush_pg_stats 2026-03-06T23:55:55.408 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph tell osd.7 flush_pg_stats 2026-03-06T23:55:55.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:55 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2328362444' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T23:55:55.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:55 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2563243307' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T23:55:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:55 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2328362444' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T23:55:55.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:55 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2563243307' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-06T23:55:56.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:56 vm08 ceph-mon[56019]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 75 KiB/s, 0 objects/s recovering 2026-03-06T23:55:56.753 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:56.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:56 vm03 ceph-mon[48028]: pgmap v51: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 75 KiB/s, 0 objects/s recovering 2026-03-06T23:55:56.882 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:56.882 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:56.882 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:56.884 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:56.886 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:56.888 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:56.913 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:57.560 INFO:teuthology.orchestra.run.vm03.stdout:81604378629 2026-03-06T23:55:57.560 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.2 2026-03-06T23:55:58.026 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:58.066 INFO:teuthology.orchestra.run.vm03.stdout:98784247812 2026-03-06T23:55:58.066 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.4 2026-03-06T23:55:58.230 INFO:teuthology.orchestra.run.vm03.stdout:115964116996 2026-03-06T23:55:58.230 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.6 2026-03-06T23:55:58.410 INFO:teuthology.orchestra.run.vm03.stdout:120259084291 2026-03-06T23:55:58.410 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.7 2026-03-06T23:55:58.472 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:58 vm03 ceph-mon[48028]: pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-06T23:55:58.701 INFO:teuthology.orchestra.run.vm03.stdout:94489280516 2026-03-06T23:55:58.701 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.3 2026-03-06T23:55:58.708 INFO:teuthology.orchestra.run.vm03.stdout:107374182404 2026-03-06T23:55:58.708 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.5 2026-03-06T23:55:58.724 INFO:teuthology.orchestra.run.vm03.stdout:68719476742 2026-03-06T23:55:58.724 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.0 2026-03-06T23:55:58.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:58 vm08 ceph-mon[56019]: pgmap v52: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-06T23:55:58.853 INFO:teuthology.orchestra.run.vm03.stdout:81604378628 2026-03-06T23:55:58.966 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378629 got 81604378628 for osd.2 2026-03-06T23:55:58.969 INFO:teuthology.orchestra.run.vm03.stdout:73014444037 2026-03-06T23:55:58.970 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.1 2026-03-06T23:55:59.500 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:59.537 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:59.679 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:55:59 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1118283675' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-06T23:55:59.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:55:59 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1118283675' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-06T23:55:59.924 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:55:59.967 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph osd last-stat-seq osd.2 2026-03-06T23:56:00.202 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:00.211 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:00.597 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:00.667 INFO:teuthology.orchestra.run.vm03.stdout:98784247812 2026-03-06T23:56:00.669 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:00 vm03 ceph-mon[48028]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-06T23:56:00.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:00 vm08 ceph-mon[56019]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 53 KiB/s, 0 objects/s recovering 2026-03-06T23:56:00.852 INFO:teuthology.orchestra.run.vm03.stdout:68719476742 2026-03-06T23:56:00.865 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247812 got 98784247812 for osd.4 2026-03-06T23:56:00.865 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:00.940 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476742 got 68719476742 for osd.0 2026-03-06T23:56:00.940 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:00.986 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:01.029 INFO:teuthology.orchestra.run.vm03.stdout:120259084291 2026-03-06T23:56:01.328 INFO:teuthology.orchestra.run.vm03.stdout:115964116996 2026-03-06T23:56:01.338 INFO:tasks.cephadm.ceph_manager.ceph:need seq 120259084291 got 120259084291 for osd.7 2026-03-06T23:56:01.338 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:01.351 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:01.427 INFO:tasks.cephadm.ceph_manager.ceph:need seq 115964116996 got 115964116996 for osd.6 2026-03-06T23:56:01.427 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:01.484 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:01 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/271206695' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-06T23:56:01.484 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:01 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/7340535' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-06T23:56:01.484 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:01 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/870910499' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-06T23:56:01.484 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:01 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2325097405' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-06T23:56:01.555 INFO:teuthology.orchestra.run.vm03.stdout:107374182404 2026-03-06T23:56:01.624 INFO:teuthology.orchestra.run.vm03.stdout:94489280517 2026-03-06T23:56:01.689 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182404 got 107374182404 for osd.5 2026-03-06T23:56:01.689 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:01.709 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280516 got 94489280517 for osd.3 2026-03-06T23:56:01.709 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:01.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:01 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/271206695' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-06T23:56:01.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:01 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/7340535' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-06T23:56:01.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:01 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/870910499' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-06T23:56:01.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:01 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2325097405' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-06T23:56:01.753 INFO:teuthology.orchestra.run.vm03.stdout:73014444038 2026-03-06T23:56:01.801 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444037 got 73014444038 for osd.1 2026-03-06T23:56:01.801 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:01.852 INFO:teuthology.orchestra.run.vm03.stdout:81604378629 2026-03-06T23:56:01.903 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378629 got 81604378629 for osd.2 2026-03-06T23:56:01.903 DEBUG:teuthology.parallel:result is None 2026-03-06T23:56:01.903 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-06T23:56:01.903 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph pg dump --format=json 2026-03-06T23:56:02.225 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:02.526 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:02.526 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-06T23:56:02.572 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":54,"stamp":"2026-03-06T22:56:00.897367+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":627960,"kb_used_data":3628,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167111432,"statfs":{"total":171765137408,"available":171122106368,"internally_reserved":0,"allocated":3715072,"data_stored":2299144,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12715,"internal_metadata":219663957},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"10.001375"},"pg_stats":[{"pgid":"1.0","version":"21'32","reported_seq":17,"reported_epoch":30,"state":"active+clean","last_fresh":"2026-03-06T22:55:51.709920+0000","last_change":"2026-03-06T22:55:51.709114+0000","last_active":"2026-03-06T22:55:51.709920+0000","last_peered":"2026-03-06T22:55:51.709920+0000","last_clean":"2026-03-06T22:55:51.709920+0000","last_became_active":"2026-03-06T22:55:51.401330+0000","last_became_peered":"2026-03-06T22:55:51.401330+0000","last_unstale":"2026-03-06T22:55:51.709920+0000","last_undegraded":"2026-03-06T22:55:51.709920+0000","last_fullsized":"2026-03-06T22:55:51.709920+0000","mapping_epoch":29,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":30,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-06T22:55:41.303599+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-06T22:55:41.303599+0000","last_clean_scrub_stamp":"2026-03-06T22:55:41.303599+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T09:55:55.447710+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":28,"seq":120259084292,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27576,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939848,"statfs":{"total":21470642176,"available":21442404352,"internally_reserved":0,"allocated":753664,"data_stored":574443,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":27,"seq":115964116997,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":436732,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530692,"statfs":{"total":21470642176,"available":21023428608,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":25,"seq":107374182404,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27128,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940296,"statfs":{"total":21470642176,"available":21442863104,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":23,"seq":98784247813,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":22,"seq":94489280517,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27580,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939844,"statfs":{"total":21470642176,"available":21442400256,"internally_reserved":0,"allocated":753664,"data_stored":574443,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1587,"internal_metadata":27457997},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378629,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":17,"seq":73014444038,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27120,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940304,"statfs":{"total":21470642176,"available":21442871296,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476742,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27576,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939848,"statfs":{"total":21470642176,"available":21442404352,"internally_reserved":0,"allocated":753664,"data_stored":574443,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1594,"internal_metadata":27457990},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-06T23:56:02.572 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph pg dump --format=json 2026-03-06T23:56:02.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:02 vm03 ceph-mon[48028]: pgmap v54: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-06T23:56:02.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:02 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/259728016' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-06T23:56:02.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:02 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/1031238017' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-06T23:56:02.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:02 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3696696436' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-06T23:56:02.594 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:02 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/2421812114' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-06T23:56:02.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:02 vm08 ceph-mon[56019]: pgmap v54: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-06T23:56:02.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:02 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/259728016' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-06T23:56:02.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:02 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/1031238017' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-06T23:56:02.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:02 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/3696696436' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-06T23:56:02.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:02 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/2421812114' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-06T23:56:02.888 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:03.204 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:03.204 INFO:teuthology.orchestra.run.vm03.stderr:dumped all 2026-03-06T23:56:03.289 INFO:teuthology.orchestra.run.vm03.stdout:{"pg_ready":true,"pg_map":{"version":55,"stamp":"2026-03-06T22:56:02.897645+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":627960,"kb_used_data":3628,"kb_used_omap":12,"kb_used_meta":214515,"kb_avail":167111432,"statfs":{"total":171765137408,"available":171122106368,"internally_reserved":0,"allocated":3715072,"data_stored":2299144,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":12715,"internal_metadata":219663957},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"11.499476"},"pg_stats":[{"pgid":"1.0","version":"21'32","reported_seq":17,"reported_epoch":30,"state":"active+clean","last_fresh":"2026-03-06T22:55:51.709920+0000","last_change":"2026-03-06T22:55:51.709114+0000","last_active":"2026-03-06T22:55:51.709920+0000","last_peered":"2026-03-06T22:55:51.709920+0000","last_clean":"2026-03-06T22:55:51.709920+0000","last_became_active":"2026-03-06T22:55:51.401330+0000","last_became_peered":"2026-03-06T22:55:51.401330+0000","last_unstale":"2026-03-06T22:55:51.709920+0000","last_undegraded":"2026-03-06T22:55:51.709920+0000","last_fullsized":"2026-03-06T22:55:51.709920+0000","mapping_epoch":29,"log_start":"0'0","ondisk_log_start":"0'0","created":20,"last_epoch_clean":30,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-06T22:55:41.303599+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-06T22:55:41.303599+0000","last_clean_scrub_stamp":"2026-03-06T22:55:41.303599+0000","objects_scrubbed":0,"log_size":32,"log_dups_size":0,"ondisk_log_size":32,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-08T09:55:55.447710+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":46,"num_read_kb":37,"num_write":57,"num_write_kb":584,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":459280,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1388544,"data_stored":1377840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":32,"ondisk_log_size":32,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":28,"seq":120259084292,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27576,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939848,"statfs":{"total":21470642176,"available":21442404352,"internally_reserved":0,"allocated":753664,"data_stored":574443,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1585,"internal_metadata":27457999},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":27,"seq":115964116997,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":436732,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20530692,"statfs":{"total":21470642176,"available":21023428608,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1589,"internal_metadata":27457995},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":25,"seq":107374182405,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27128,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940296,"statfs":{"total":21470642176,"available":21442863104,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":23,"seq":98784247813,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":22,"seq":94489280517,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27580,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939844,"statfs":{"total":21470642176,"available":21442400256,"internally_reserved":0,"allocated":753664,"data_stored":574443,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1587,"internal_metadata":27457997},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378630,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27124,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940300,"statfs":{"total":21470642176,"available":21442867200,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":17,"seq":73014444038,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":27120,"kb_used_data":284,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20940304,"statfs":{"total":21470642176,"available":21442871296,"internally_reserved":0,"allocated":290816,"data_stored":115163,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1590,"internal_metadata":27457994},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476742,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":27576,"kb_used_data":736,"kb_used_omap":1,"kb_used_meta":26814,"kb_avail":20939848,"statfs":{"total":21470642176,"available":21442404352,"internally_reserved":0,"allocated":753664,"data_stored":574443,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":1594,"internal_metadata":27457990},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":462848,"data_stored":459280,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-06T23:56:03.289 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-06T23:56:03.289 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-06T23:56:03.289 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-06T23:56:03.289 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph health --format=json 2026-03-06T23:56:03.489 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:03 vm03 ceph-mon[48028]: from='client.14520 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:03.641 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:03.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:03 vm08 ceph-mon[56019]: from='client.14520 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:03.980 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:03.980 INFO:teuthology.orchestra.run.vm03.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-06T23:56:04.034 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-06T23:56:04.034 INFO:tasks.cephadm:Setup complete, yielding 2026-03-06T23:56:04.034 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T23:56:04.036 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:04.036 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch status' 2026-03-06T23:56:04.349 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:04.628 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:04 vm03 ceph-mon[48028]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 39 KiB/s, 0 objects/s recovering 2026-03-06T23:56:04.628 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:04 vm03 ceph-mon[48028]: from='client.24319 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:04.629 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:04 vm03 ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:04.629 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:04 vm03 ceph-mon[48028]: from='client.? 192.168.123.103:0/3645672392' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-06T23:56:04.674 INFO:teuthology.orchestra.run.vm03.stdout:Backend: cephadm 2026-03-06T23:56:04.674 INFO:teuthology.orchestra.run.vm03.stdout:Available: Yes 2026-03-06T23:56:04.674 INFO:teuthology.orchestra.run.vm03.stdout:Paused: No 2026-03-06T23:56:04.743 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch ps' 2026-03-06T23:56:04.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:04 vm08 ceph-mon[56019]: pgmap v55: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 39 KiB/s, 0 objects/s recovering 2026-03-06T23:56:04.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:04 vm08 ceph-mon[56019]: from='client.24319 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:04.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:04 vm08 ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:04.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:04 vm08 ceph-mon[56019]: from='client.? 192.168.123.103:0/3645672392' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-06T23:56:05.056 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager.vm03 vm03 *:9093,9094 running (56s) 16s ago 98s 21.3M - 0.25.0 c8568f914cd2 1dbe2f74b08c 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm03 vm03 running (106s) 16s ago 106s 8786k - 19.2.3-39-g340d3c24fc6 8bccc98d839a c7d3bc3e325d 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm08 vm08 running (71s) 17s ago 71s 6727k - 19.2.3-39-g340d3c24fc6 8bccc98d839a 9e1eaa9dfb3a 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm03 vm03 running (105s) 16s ago 105s 11.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 97554af5c861 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm08 vm08 running (70s) 17s ago 70s 11.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a fd70e00505f9 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:grafana.vm03 vm03 *:3000 running (55s) 16s ago 92s 66.3M - 10.4.0 c8b91775d855 6ad012d74bd3 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm03.xzkqce vm03 *:9283,8765,8443 running (2m) 16s ago 2m 543M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 723e49aecc15 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm08.bnopnr vm08 *:8443,9283,8765 running (66s) 17s ago 66s 479M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 23af230679dd 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm03 vm03 running (2m) 16s ago 2m 49.9M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a 49f39418d469 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm08 vm08 running (64s) 17s ago 64s 45.5M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a f4a7f049da3e 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm03 vm03 *:9100 running (102s) 16s ago 102s 9323k - 1.7.0 72c9c2088986 252a934ab624 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm08 vm08 *:9100 running (67s) 17s ago 67s 9202k - 1.7.0 72c9c2088986 08a294c1dfce 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.0 vm08 running (32s) 17s ago 32s 36.7M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a db428a6afa60 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.1 vm03 running (31s) 16s ago 31s 39.5M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a d1e121ceddcf 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.2 vm08 running (29s) 17s ago 29s 53.1M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 10fe666c3beb 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.3 vm03 running (27s) 16s ago 27s 56.1M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a c073c0190b0a 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.4 vm08 running (25s) 17s ago 25s 54.0M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 11843ffaf4c6 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.5 vm03 running (24s) 16s ago 24s 55.5M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 4f43b0eaf42c 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.6 vm08 running (22s) 17s ago 22s 58.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 833d32daeb35 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:osd.7 vm03 running (20s) 16s ago 20s 28.0M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 92a834b69a71 2026-03-06T23:56:05.359 INFO:teuthology.orchestra.run.vm03.stdout:prometheus.vm03 vm03 *:9095 running (54s) 16s ago 86s 36.1M - 2.51.0 1d3b7f56885b 72d1b930feef 2026-03-06T23:56:05.405 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch ls' 2026-03-06T23:56:05.712 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager ?:9093,9094 1/1 17s ago 2m count:1 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter 2/2 17s ago 2m * 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:crash 2/2 17s ago 2m * 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:grafana ?:3000 1/1 17s ago 2m count:1 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:mgr 2/2 17s ago 2m count:2 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:mon 2/2 17s ago 103s vm03:192.168.123.103=vm03;vm08:192.168.123.108=vm08;count:2 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter ?:9100 2/2 17s ago 2m * 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:osd.all-available-devices 8 17s ago 55s * 2026-03-06T23:56:06.017 INFO:teuthology.orchestra.run.vm03.stdout:prometheus ?:9095 1/1 17s ago 2m count:1 2026-03-06T23:56:06.081 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch host ls' 2026-03-06T23:56:06.383 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:06.710 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:06 vm03 ceph-mon[48028]: from='client.24325 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:06.710 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:06 vm03 ceph-mon[48028]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 37 KiB/s, 0 objects/s recovering 2026-03-06T23:56:06.710 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:06 vm03 ceph-mon[48028]: from='client.14536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:06.710 INFO:teuthology.orchestra.run.vm03.stdout:HOST ADDR LABELS STATUS 2026-03-06T23:56:06.710 INFO:teuthology.orchestra.run.vm03.stdout:vm03 192.168.123.103 2026-03-06T23:56:06.710 INFO:teuthology.orchestra.run.vm03.stdout:vm08 192.168.123.108 2026-03-06T23:56:06.710 INFO:teuthology.orchestra.run.vm03.stdout:2 hosts in cluster 2026-03-06T23:56:06.775 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch device ls' 2026-03-06T23:56:06.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:06 vm08 ceph-mon[56019]: from='client.24325 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:06.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:06 vm08 ceph-mon[56019]: pgmap v56: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail; 37 KiB/s, 0 objects/s recovering 2026-03-06T23:56:06.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:06 vm08 ceph-mon[56019]: from='client.14536 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:07.094 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 16s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdb hdd DWNBRSTVMM03001 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdc hdd DWNBRSTVMM03002 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdd hdd DWNBRSTVMM03003 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vde hdd DWNBRSTVMM03004 20.0G No 16s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 17s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdb hdd DWNBRSTVMM08001 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdc hdd DWNBRSTVMM08002 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdd hdd DWNBRSTVMM08003 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.415 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vde hdd DWNBRSTVMM08004 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:07.483 INFO:teuthology.run_tasks:Running task vip... 2026-03-06T23:56:07.485 INFO:tasks.vip:Allocating static IPs for each host... 2026-03-06T23:56:07.486 INFO:tasks.vip:peername 192.168.123.103 2026-03-06T23:56:07.486 INFO:tasks.vip:192.168.123.103 in 192.168.123.0/24, pos 102 2026-03-06T23:56:07.486 INFO:tasks.vip:vm03.local static 12.12.0.103, vnet 12.12.0.0/22 2026-03-06T23:56:07.486 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.103')] 2026-03-06T23:56:07.486 DEBUG:teuthology.orchestra.run.vm03:> sudo ip route ls 2026-03-06T23:56:07.511 INFO:teuthology.orchestra.run.vm03.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.103 metric 100 2026-03-06T23:56:07.512 INFO:teuthology.orchestra.run.vm03.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.103 metric 100 2026-03-06T23:56:07.513 INFO:tasks.vip:Configuring 12.12.0.103 on vm03.local iface eth0... 2026-03-06T23:56:07.513 DEBUG:teuthology.orchestra.run.vm03:> sudo ip addr add 12.12.0.103/22 dev eth0 2026-03-06T23:56:07.582 INFO:tasks.vip:peername 192.168.123.108 2026-03-06T23:56:07.582 INFO:tasks.vip:192.168.123.108 in 192.168.123.0/24, pos 107 2026-03-06T23:56:07.582 INFO:tasks.vip:vm08.local static 12.12.0.108, vnet 12.12.0.0/22 2026-03-06T23:56:07.582 DEBUG:teuthology.orchestra.run.vm08:> sudo ip route ls 2026-03-06T23:56:07.606 INFO:teuthology.orchestra.run.vm08.stdout:default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.108 metric 100 2026-03-06T23:56:07.606 INFO:teuthology.orchestra.run.vm08.stdout:192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.108 metric 100 2026-03-06T23:56:07.607 INFO:tasks.vip:Configuring 12.12.0.108 on vm08.local iface eth0... 2026-03-06T23:56:07.607 DEBUG:teuthology.orchestra.run.vm08:> sudo ip addr add 12.12.0.108/22 dev eth0 2026-03-06T23:56:07.674 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T23:56:07.676 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:07.676 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch device ls --refresh' 2026-03-06T23:56:07.698 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:07 vm03 ceph-mon[48028]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:07.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:07 vm08 ceph-mon[56019]: from='client.14540 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:08.003 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:08.317 INFO:teuthology.orchestra.run.vm03.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-06T23:56:08.317 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 17s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T23:56:08.317 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdb hdd DWNBRSTVMM03001 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdc hdd DWNBRSTVMM03002 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdd hdd DWNBRSTVMM03003 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vde hdd DWNBRSTVMM03004 20.0G No 17s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 18s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdb hdd DWNBRSTVMM08001 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdc hdd DWNBRSTVMM08002 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdd hdd DWNBRSTVMM08003 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.318 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vde hdd DWNBRSTVMM08004 20.0G No 18s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:08.411 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-06T23:56:08.413 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:08.413 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-06T23:56:08.476 INFO:teuthology.orchestra.run.vm03.stderr:+ systemctl stop nfs-server 2026-03-06T23:56:08.482 INFO:tasks.vip:Running commands on role host.b host ubuntu@vm08.local 2026-03-06T23:56:08.482 DEBUG:teuthology.orchestra.run.vm08:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'systemctl stop nfs-server' 2026-03-06T23:56:08.509 INFO:teuthology.orchestra.run.vm08.stderr:+ systemctl stop nfs-server 2026-03-06T23:56:08.517 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T23:56:08.580 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:08.580 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch apply rgw foorgw --port 8800' 2026-03-06T23:56:08.764 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:08 vm08.local ceph-mon[56019]: from='client.14544 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:08.764 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:08 vm08.local ceph-mon[56019]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:08.764 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:08 vm08.local ceph-mon[56019]: from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:08.764 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:08 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:08.875 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:08 vm03.local ceph-mon[48028]: from='client.14544 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:08.876 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:08 vm03.local ceph-mon[48028]: pgmap v57: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:08.876 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:08 vm03.local ceph-mon[48028]: from='client.14548 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:08.876 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:08 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:08.951 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:09.345 INFO:teuthology.orchestra.run.vm03.stdout:Scheduled rgw.foorgw update... 2026-03-06T23:56:09.415 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph nfs cluster create foo --ingress --virtual-ip 12.12.1.103/22' 2026-03-06T23:56:09.640 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:09 vm08.local ceph-mon[56019]: from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:09.640 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:09 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:09.640 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:09 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:09.640 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:09 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:09.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:09 vm03.local ceph-mon[48028]: from='client.14552 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:09.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:09 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:09.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:09 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:09.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:09 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:09.836 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foorgw", "port": 8800, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: Saving service rgw.foorgw spec with placement count:2 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:10.644 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:10 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T23:56:11.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: pgmap v58: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:11.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: from='client.14556 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foorgw", "port": 8800, "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:11.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: Saving service rgw.foorgw spec with placement count:2 2026-03-06T23:56:11.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:11.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:11.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:11.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:11.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:10 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]: dispatch 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: from='client.14560 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "ingress": true, "virtual_ip": "12.12.1.103/22", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]': finished 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: osdmap e31: 8 total, 8 up, 8 in 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:11.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:11 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: from='client.14560 -' entity='client.admin' cmd=[{"prefix": "nfs cluster create", "cluster_id": "foo", "ingress": true, "virtual_ip": "12.12.1.103/22", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool create", "pool": ".nfs", "yes_i_really_mean_it": true}]': finished 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: osdmap e31: 8 total, 8 up, 8 in 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]: dispatch 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:11 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.189 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-06T23:56:12.191 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:12.191 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'dnf install -y python3-boto3 || apt install -y python3-boto3' 2026-03-06T23:56:12.224 INFO:teuthology.orchestra.run.vm03.stderr:+ dnf install -y python3-boto3 2026-03-06T23:56:12.591 INFO:teuthology.orchestra.run.vm03.stdout:Last metadata expiration check: 0:04:20 ago on Fri 06 Mar 2026 11:51:52 PM CET. 2026-03-06T23:56:12.684 INFO:teuthology.orchestra.run.vm03.stdout:Dependencies resolved. 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: Package Arch Version Repository Size 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:Installing: 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: python3-boto3 noarch 1.28.62-1.el9 epel 164 k 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:Installing dependencies: 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: python3-botocore noarch 1.31.62-2.el9 epel 6.1 M 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: python3-jmespath noarch 1.0.1-1.el9 appstream 48 k 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: python3-s3transfer noarch 0.7.0-1.el9 epel 113 k 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: python3-urllib3 noarch 1.26.5-7.el9 baseos 218 k 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:Transaction Summary 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:================================================================================ 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:Install 5 Packages 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:Total download size: 6.6 M 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:Installed size: 86 M 2026-03-06T23:56:12.685 INFO:teuthology.orchestra.run.vm03.stdout:Downloading Packages: 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: osdmap e32: 8 total, 8 up, 8 in 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: Saving service nfs.foo spec with placement count:1 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: Saving service ingress.nfs.foo spec with placement count:2 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm08.taucyu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm08.taucyu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:12.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:12 vm08.local ceph-mon[56019]: Deploying daemon rgw.foorgw.vm08.taucyu on vm08 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: pgmap v59: 1 pgs: 1 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pool application enable", "pool": ".nfs", "app": "nfs"}]': finished 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: osdmap e32: 8 total, 8 up, 8 in 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: Saving service nfs.foo spec with placement count:1 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: Saving service ingress.nfs.foo spec with placement count:2 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm08.taucyu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm08.taucyu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:13.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:12 vm03.local ceph-mon[48028]: Deploying daemon rgw.foorgw.vm08.taucyu on vm08 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: pgmap v62: 33 pgs: 5 creating+peering, 22 unknown, 6 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: osdmap e33: 8 total, 8 up, 8 in 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm03.ckntua", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm03.ckntua", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: Deploying daemon rgw.foorgw.vm03.ckntua on vm03 2026-03-06T23:56:14.346 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:14 vm08.local ceph-mon[56019]: mgrmap e20: vm03.xzkqce(active, since 84s), standbys: vm08.bnopnr 2026-03-06T23:56:14.360 INFO:teuthology.orchestra.run.vm03.stdout:(1/5): python3-boto3-1.28.62-1.el9.noarch.rpm 16 MB/s | 164 kB 00:00 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: pgmap v62: 33 pgs: 5 creating+peering, 22 unknown, 6 active+clean; 449 KiB data, 613 MiB used, 159 GiB / 160 GiB avail 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: osdmap e33: 8 total, 8 up, 8 in 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm03.ckntua", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foorgw.vm03.ckntua", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T23:56:14.361 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:14.362 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:14.362 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: Deploying daemon rgw.foorgw.vm03.ckntua on vm03 2026-03-06T23:56:14.362 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:14 vm03.local ceph-mon[48028]: mgrmap e20: vm03.xzkqce(active, since 84s), standbys: vm08.bnopnr 2026-03-06T23:56:14.444 INFO:teuthology.orchestra.run.vm03.stdout:(2/5): python3-botocore-1.31.62-2.el9.noarch.rp 73 MB/s | 6.1 MB 00:00 2026-03-06T23:56:14.449 INFO:teuthology.orchestra.run.vm03.stdout:(3/5): python3-s3transfer-0.7.0-1.el9.noarch.rp 26 MB/s | 113 kB 00:00 2026-03-06T23:56:14.499 INFO:teuthology.orchestra.run.vm03.stdout:(4/5): python3-jmespath-1.0.1-1.el9.noarch.rpm 321 kB/s | 48 kB 00:00 2026-03-06T23:56:14.545 INFO:teuthology.orchestra.run.vm03.stdout:(5/5): python3-urllib3-1.26.5-7.el9.noarch.rpm 1.1 MB/s | 218 kB 00:00 2026-03-06T23:56:14.545 INFO:teuthology.orchestra.run.vm03.stdout:-------------------------------------------------------------------------------- 2026-03-06T23:56:14.546 INFO:teuthology.orchestra.run.vm03.stdout:Total 3.6 MB/s | 6.6 MB 00:01 2026-03-06T23:56:14.572 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction check 2026-03-06T23:56:14.578 INFO:teuthology.orchestra.run.vm03.stdout:Transaction check succeeded. 2026-03-06T23:56:14.578 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction test 2026-03-06T23:56:14.632 INFO:teuthology.orchestra.run.vm03.stdout:Transaction test succeeded. 2026-03-06T23:56:14.633 INFO:teuthology.orchestra.run.vm03.stdout:Running transaction 2026-03-06T23:56:14.748 INFO:teuthology.orchestra.run.vm03.stdout: Preparing : 1/1 2026-03-06T23:56:14.821 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-jmespath-1.0.1-1.el9.noarch 1/5 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: osdmap e34: 8 total, 8 up, 8 in 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='client.? 192.168.123.108:0/1884524013' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: Saving service rgw.foorgw spec with placement count:2 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.414 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: Creating key for client.nfs.foo.0.0.vm03.cvyxvl 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: Rados config object exists: conf-nfs.foo 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: Creating key for client.nfs.foo.0.0.vm03.cvyxvl-rgw 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T23:56:15.415 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:15 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:15.724 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-urllib3-1.26.5-7.el9.noarch 2/5 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: osdmap e34: 8 total, 8 up, 8 in 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='client.? 192.168.123.108:0/1884524013' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: Saving service rgw.foorgw spec with placement count:2 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: Creating key for client.nfs.foo.0.0.vm03.cvyxvl 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]: dispatch 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl", "caps": ["mon", "allow r", "osd", "allow rw pool=.nfs namespace=foo"]}]': finished 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: Ensuring nfs.foo.0 is in the ganesha grace table 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]: dispatch 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.mgr.nfs.grace.nfs.foo", "caps": ["mon", "allow r", "osd", "allow rwx pool .nfs"]}]': finished 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]: dispatch 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth rm", "entity": "client.mgr.nfs.grace.nfs.foo"}]': finished 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: Rados config object exists: conf-nfs.foo 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: Creating key for client.nfs.foo.0.0.vm03.cvyxvl-rgw 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "auth get-or-create", "entity": "client.nfs.foo.0.0.vm03.cvyxvl-rgw", "caps": ["mon", "allow r", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-06T23:56:15.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:15 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:15.751 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-botocore-1.31.62-2.el9.noarch 3/5 2026-03-06T23:56:15.784 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-s3transfer-0.7.0-1.el9.noarch 4/5 2026-03-06T23:56:15.806 INFO:teuthology.orchestra.run.vm03.stdout: Installing : python3-boto3-1.28.62-1.el9.noarch 5/5 2026-03-06T23:56:16.080 INFO:teuthology.orchestra.run.vm03.stdout: Running scriptlet: python3-boto3-1.28.62-1.el9.noarch 5/5 2026-03-06T23:56:16.080 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-urllib3-1.26.5-7.el9.noarch 1/5 2026-03-06T23:56:16.080 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-jmespath-1.0.1-1.el9.noarch 2/5 2026-03-06T23:56:16.080 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-boto3-1.28.62-1.el9.noarch 3/5 2026-03-06T23:56:16.080 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-botocore-1.31.62-2.el9.noarch 4/5 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout: Verifying : python3-s3transfer-0.7.0-1.el9.noarch 5/5 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout:Installed: 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout: python3-boto3-1.28.62-1.el9.noarch python3-botocore-1.31.62-2.el9.noarch 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout: python3-jmespath-1.0.1-1.el9.noarch python3-s3transfer-0.7.0-1.el9.noarch 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout: python3-urllib3-1.26.5-7.el9.noarch 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:16.156 INFO:teuthology.orchestra.run.vm03.stdout:Complete! 2026-03-06T23:56:16.202 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c '/home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json' 2026-03-06T23:56:16.276 INFO:teuthology.orchestra.run.vm03.stderr:+ /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo 2026-03-06T23:56:16.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: Bind address in nfs.foo.0.0.vm03.cvyxvl's ganesha conf is defaulting to empty 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: Deploying daemon nfs.foo.0.0.vm03.cvyxvl on vm03 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: pgmap v65: 65 pgs: 9 creating+peering, 30 unknown, 26 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 0 op/s 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: osdmap e35: 8 total, 8 up, 8 in 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: osdmap e36: 8 total, 8 up, 8 in 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-06T23:56:16.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:16 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-06T23:56:16.617 INFO:teuthology.orchestra.run.vm03.stderr:Inferring fsid 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:56:16.669 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: Bind address in nfs.foo.0.0.vm03.cvyxvl's ganesha conf is defaulting to empty 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: Deploying daemon nfs.foo.0.0.vm03.cvyxvl on vm03 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: pgmap v65: 65 pgs: 9 creating+peering, 30 unknown, 26 active+clean; 449 KiB data, 213 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 0 op/s 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: osdmap e35: 8 total, 8 up, 8 in 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: osdmap e36: 8 total, 8 up, 8 in 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-06T23:56:16.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-06T23:56:16.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:16 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]: dispatch 2026-03-06T23:56:16.769 INFO:teuthology.orchestra.run.vm03.stderr:Using ceph image with id '8bccc98d839a' and tag 'cobaltcore-storage-v19.2.3-fasttrack-5' created on 2026-03-06 14:41:18 +0000 UTC 2026-03-06T23:56:16.769 INFO:teuthology.orchestra.run.vm03.stderr:harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0 2026-03-06T23:56:17.743 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:17 vm08.local ceph-mon[56019]: Deploying daemon haproxy.nfs.foo.vm03.pnfmnq on vm03 2026-03-06T23:56:17.743 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:17 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-06T23:56:17.743 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:17 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-06T23:56:17.743 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:17 vm08.local ceph-mon[56019]: osdmap e37: 8 total, 8 up, 8 in 2026-03-06T23:56:17.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:17 vm03.local ceph-mon[48028]: Deploying daemon haproxy.nfs.foo.vm03.pnfmnq on vm03 2026-03-06T23:56:17.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:17 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-06T23:56:17.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:17 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-03-06T23:56:17.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:17 vm03.local ceph-mon[48028]: osdmap e37: 8 total, 8 up, 8 in 2026-03-06T23:56:18.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:18 vm08.local ceph-mon[56019]: pgmap v68: 97 pgs: 9 creating+peering, 53 unknown, 35 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 0 op/s 2026-03-06T23:56:18.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:18 vm08.local ceph-mon[56019]: osdmap e38: 8 total, 8 up, 8 in 2026-03-06T23:56:18.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:18 vm08.local ceph-mon[56019]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:18.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:18 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:18.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:18 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:18.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:18 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:18.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:18 vm03.local ceph-mon[48028]: pgmap v68: 97 pgs: 9 creating+peering, 53 unknown, 35 active+clean; 449 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 0 B/s wr, 0 op/s 2026-03-06T23:56:18.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:18 vm03.local ceph-mon[48028]: osdmap e38: 8 total, 8 up, 8 in 2026-03-06T23:56:18.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:18 vm03.local ceph-mon[48028]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:18.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:18 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:18.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:18 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:18.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:18 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]: dispatch 2026-03-06T23:56:20.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:19 vm08.local ceph-mon[56019]: pgmap v71: 129 pgs: 11 creating+peering, 65 unknown, 53 active+clean; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 1023 B/s wr, 4 op/s 2026-03-06T23:56:20.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:19 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:20.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:19 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:20.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:19 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-06T23:56:20.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:19 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-06T23:56:20.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:19 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-06T23:56:20.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:19 vm08.local ceph-mon[56019]: osdmap e39: 8 total, 8 up, 8 in 2026-03-06T23:56:20.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:19 vm03.local ceph-mon[48028]: pgmap v71: 129 pgs: 11 creating+peering, 65 unknown, 53 active+clean; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 1023 B/s wr, 4 op/s 2026-03-06T23:56:20.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:19 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:20.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:19 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:20.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:19 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-06T23:56:20.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:19 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-06T23:56:20.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:19 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-03-06T23:56:20.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:19 vm03.local ceph-mon[48028]: osdmap e39: 8 total, 8 up, 8 in 2026-03-06T23:56:21.494 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: Deploying daemon haproxy.nfs.foo.vm08.kzoyjo on vm08 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: osdmap e40: 8 total, 8 up, 8 in 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.495 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:21 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:21.537 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: Deploying daemon haproxy.nfs.foo.vm08.kzoyjo on vm08 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: osdmap e40: 8 total, 8 up, 8 in 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]: dispatch 2026-03-06T23:56:21.538 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:21 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:22.543 INFO:teuthology.run_tasks:Running task python... 2026-03-06T23:56:22.546 INFO:tasks.python:Running python on role host.a host ubuntu@vm03.local 2026-03-06T23:56:22.546 INFO:tasks.python:import boto3 import json with open('/tmp/user.json', 'rt') as f: info = json.loads(f.read()) s3 = boto3.resource( 's3', aws_access_key_id=info['keys'][0]['access_key'], aws_secret_access_key=info['keys'][0]['secret_key'], endpoint_url='http://localhost:8800', ) bucket = s3.Bucket('foobucket') bucket.create() bucket.put_object(Key='myobject', Body='thebody') 2026-03-06T23:56:22.546 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest python3 2026-03-06T23:56:22.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: pgmap v74: 161 pgs: 15 creating+peering, 51 unknown, 95 active+clean; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 3.7 KiB/s wr, 15 op/s 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: osdmap e41: 8 total, 8 up, 8 in 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:22.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:22 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:22.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: pgmap v74: 161 pgs: 15 creating+peering, 51 unknown, 95 active+clean; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 3.7 KiB/s wr, 15 op/s 2026-03-06T23:56:22.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-06T23:56:22.789 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-06T23:56:22.790 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-03-06T23:56:22.790 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: from='client.? 192.168.123.108:0/2649005525' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:22.790 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: osdmap e41: 8 total, 8 up, 8 in 2026-03-06T23:56:22.790 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:22.790 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:22.790 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:22 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:23.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:23 vm08.local ceph-mon[56019]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:23.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:23 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:23.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:23 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:23.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:23 vm08.local ceph-mon[56019]: osdmap e42: 8 total, 8 up, 8 in 2026-03-06T23:56:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:23 vm03.local ceph-mon[48028]: from='client.? ' entity='client.rgw.foorgw.vm08.taucyu' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:23 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:23 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/621094677' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:23.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:23 vm03.local ceph-mon[48028]: osdmap e42: 8 total, 8 up, 8 in 2026-03-06T23:56:24.482 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:24 vm08.local ceph-mon[56019]: pgmap v77: 161 pgs: 15 creating+peering, 30 unknown, 116 active+clean; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 7.0 KiB/s rd, 2.7 KiB/s wr, 10 op/s 2026-03-06T23:56:24.482 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:24 vm08.local ceph-mon[56019]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-06T23:56:24.482 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:24 vm08.local ceph-mon[56019]: Cluster is now healthy 2026-03-06T23:56:24.482 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:24 vm08.local ceph-mon[56019]: osdmap e43: 8 total, 8 up, 8 in 2026-03-06T23:56:24.482 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:24 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.index","app": "rgw"}]: dispatch 2026-03-06T23:56:24.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:24 vm03.local ceph-mon[48028]: pgmap v77: 161 pgs: 15 creating+peering, 30 unknown, 116 active+clean; 450 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 7.0 KiB/s rd, 2.7 KiB/s wr, 10 op/s 2026-03-06T23:56:24.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:24 vm03.local ceph-mon[48028]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-06T23:56:24.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:24 vm03.local ceph-mon[48028]: Cluster is now healthy 2026-03-06T23:56:24.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:24 vm03.local ceph-mon[48028]: osdmap e43: 8 total, 8 up, 8 in 2026-03-06T23:56:24.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:24 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.index","app": "rgw"}]: dispatch 2026-03-06T23:56:25.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:25 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.index","app": "rgw"}]': finished 2026-03-06T23:56:25.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:25 vm08.local ceph-mon[56019]: osdmap e44: 8 total, 8 up, 8 in 2026-03-06T23:56:25.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:25 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.buckets.index", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:25.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:25 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:25.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:25 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:25.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:25 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:25.745 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:25 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:25.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:25 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.index","app": "rgw"}]': finished 2026-03-06T23:56:25.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:25 vm03.local ceph-mon[48028]: osdmap e44: 8 total, 8 up, 8 in 2026-03-06T23:56:25.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:25 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.buckets.index", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-06T23:56:25.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:25 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:25.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:25 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:25.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:25 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:25.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:25 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:26.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:26 vm08.local ceph-mon[56019]: 12.12.1.103 is in 12.12.0.0/22 on vm08 interface eth0 2026-03-06T23:56:26.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:26 vm08.local ceph-mon[56019]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-06T23:56:26.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:26 vm08.local ceph-mon[56019]: Deploying daemon keepalived.nfs.foo.vm08.hxbwag on vm08 2026-03-06T23:56:26.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:26 vm08.local ceph-mon[56019]: pgmap v80: 193 pgs: 18 creating+peering, 21 unknown, 154 active+clean; 453 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 172 KiB/s rd, 6.2 KiB/s wr, 336 op/s 2026-03-06T23:56:26.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:26 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.buckets.index", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:26.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:26 vm08.local ceph-mon[56019]: osdmap e45: 8 total, 8 up, 8 in 2026-03-06T23:56:27.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:26 vm03.local ceph-mon[48028]: 12.12.1.103 is in 12.12.0.0/22 on vm08 interface eth0 2026-03-06T23:56:27.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:26 vm03.local ceph-mon[48028]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-06T23:56:27.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:26 vm03.local ceph-mon[48028]: Deploying daemon keepalived.nfs.foo.vm08.hxbwag on vm08 2026-03-06T23:56:27.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:26 vm03.local ceph-mon[48028]: pgmap v80: 193 pgs: 18 creating+peering, 21 unknown, 154 active+clean; 453 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 172 KiB/s rd, 6.2 KiB/s wr, 336 op/s 2026-03-06T23:56:27.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:26 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.buckets.index", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-06T23:56:27.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:26 vm03.local ceph-mon[48028]: osdmap e45: 8 total, 8 up, 8 in 2026-03-06T23:56:27.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:27 vm08.local ceph-mon[56019]: osdmap e46: 8 total, 8 up, 8 in 2026-03-06T23:56:27.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:27 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.data","app": "rgw"}]: dispatch 2026-03-06T23:56:28.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:27 vm03.local ceph-mon[48028]: osdmap e46: 8 total, 8 up, 8 in 2026-03-06T23:56:28.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:27 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.data","app": "rgw"}]: dispatch 2026-03-06T23:56:28.736 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T23:56:28.739 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:28.739 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph nfs export create rgw --cluster-id foo --pseudo-path /foouser --user-id foouser' 2026-03-06T23:56:28.951 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:28 vm08.local ceph-mon[56019]: pgmap v83: 225 pgs: 16 creating+peering, 42 unknown, 167 active+clean; 454 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 235 KiB/s rd, 8.7 KiB/s wr, 460 op/s 2026-03-06T23:56:28.951 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:28 vm08.local ceph-mon[56019]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-06T23:56:28.951 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:28 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.data","app": "rgw"}]': finished 2026-03-06T23:56:28.951 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:28 vm08.local ceph-mon[56019]: osdmap e47: 8 total, 8 up, 8 in 2026-03-06T23:56:28.951 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:28 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.buckets.data", "var": "bulk", "val": "true"}]: dispatch 2026-03-06T23:56:29.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:28 vm03.local ceph-mon[48028]: pgmap v83: 225 pgs: 16 creating+peering, 42 unknown, 167 active+clean; 454 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 235 KiB/s rd, 8.7 KiB/s wr, 460 op/s 2026-03-06T23:56:29.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:28 vm03.local ceph-mon[48028]: Health check failed: 1 pool(s) do not have an application enabled (POOL_APP_NOT_ENABLED) 2026-03-06T23:56:29.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:28 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.buckets.data","app": "rgw"}]': finished 2026-03-06T23:56:29.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:28 vm03.local ceph-mon[48028]: osdmap e47: 8 total, 8 up, 8 in 2026-03-06T23:56:29.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:28 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.buckets.data", "var": "bulk", "val": "true"}]: dispatch 2026-03-06T23:56:29.073 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:29.603 INFO:teuthology.orchestra.run.vm03.stdout:{ 2026-03-06T23:56:29.603 INFO:teuthology.orchestra.run.vm03.stdout: "bind": "/foouser", 2026-03-06T23:56:29.603 INFO:teuthology.orchestra.run.vm03.stdout: "cluster": "foo", 2026-03-06T23:56:29.603 INFO:teuthology.orchestra.run.vm03.stdout: "mode": "RW", 2026-03-06T23:56:29.603 INFO:teuthology.orchestra.run.vm03.stdout: "path": "/", 2026-03-06T23:56:29.603 INFO:teuthology.orchestra.run.vm03.stdout: "squash": "none" 2026-03-06T23:56:29.603 INFO:teuthology.orchestra.run.vm03.stdout:} 2026-03-06T23:56:29.714 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-06T23:56:29.717 INFO:tasks.cephadm:Waiting for ceph service nfs.foo to start (timeout 300)... 2026-03-06T23:56:29.717 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch ls -f json 2026-03-06T23:56:29.944 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.buckets.data", "var": "bulk", "val": "true"}]': finished 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: osdmap e48: 8 total, 8 up, 8 in 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: pgmap v86: 225 pgs: 21 creating+peering, 20 unknown, 184 active+clean; 454 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 91 KiB/s rd, 2.7 KiB/s wr, 177 op/s 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: 12.12.1.103 is in 12.12.0.0/22 on vm08 interface eth0 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: Deploying daemon keepalived.nfs.foo.vm03.pgxdbu on vm03 2026-03-06T23:56:29.945 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:29 vm03.local ceph-mon[48028]: from='client.14614 -' entity='client.admin' cmd=[{"prefix": "nfs export create rgw", "cluster_id": "foo", "pseudo_path": "/foouser", "user_id": "foouser", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:29.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: from='client.? 192.168.123.103:0/2742430888' entity='client.rgw.foorgw.vm03.ckntua' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.buckets.data", "var": "bulk", "val": "true"}]': finished 2026-03-06T23:56:29.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: osdmap e48: 8 total, 8 up, 8 in 2026-03-06T23:56:29.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: pgmap v86: 225 pgs: 21 creating+peering, 20 unknown, 184 active+clean; 454 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 91 KiB/s rd, 2.7 KiB/s wr, 177 op/s 2026-03-06T23:56:29.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:29.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:29.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:29.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: 12.12.1.103 is in 12.12.0.0/22 on vm03 interface eth0 2026-03-06T23:56:29.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: 12.12.1.103 is in 12.12.0.0/22 on vm08 interface eth0 2026-03-06T23:56:29.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: Deploying daemon keepalived.nfs.foo.vm03.pgxdbu on vm03 2026-03-06T23:56:29.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:29 vm08.local ceph-mon[56019]: from='client.14614 -' entity='client.admin' cmd=[{"prefix": "nfs export create rgw", "cluster_id": "foo", "pseudo_path": "/foouser", "user_id": "foouser", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:30.121 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:30.451 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:30.451 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T22:54:01.168936Z", "last_refresh": "2026-03-06T22:56:10.048976Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:54:54.356818Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T22:53:59.566189Z", "last_refresh": "2026-03-06T22:56:09.193120Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:54:55.332232Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T22:53:59.141843Z", "last_refresh": "2026-03-06T22:56:09.193169Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T22:54:00.391211Z", "last_refresh": "2026-03-06T22:56:10.049002Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:29.279565Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2049, "monitor_port": 9049, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-06T22:56:12.119607Z", "ports": [2049, 9049], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-06T22:54:59.051172Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T22:53:58.796341Z", "last_refresh": "2026-03-06T22:56:09.193238Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:00.529428Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T22:54:22.648138Z", "last_refresh": "2026-03-06T22:56:09.193266Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:56:16.158012Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12049}, "status": {"created": "2026-03-06T22:56:12.112297Z", "ports": [12049], "running": 0, "size": 1}}, {"events": ["2026-03-06T22:54:58.093905Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T22:54:00.778915Z", "last_refresh": "2026-03-06T22:56:09.193204Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:10.345071Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T22:55:10.331918Z", "last_refresh": "2026-03-06T22:56:09.193293Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T22:55:00.531903Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T22:53:59.964507Z", "last_refresh": "2026-03-06T22:56:10.049028Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:14.501210Z service:rgw.foorgw [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foorgw", "service_name": "rgw.foorgw", "service_type": "rgw", "spec": {"rgw_frontend_port": 8800}, "status": {"created": "2026-03-06T22:56:14.495594Z", "ports": [8800], "running": 0, "size": 2}}] 2026-03-06T23:56:30.501 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T23:56:30.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:30 vm08.local ceph-mon[56019]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-06T23:56:30.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:30 vm08.local ceph-mon[56019]: Cluster is now healthy 2026-03-06T23:56:30.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:30 vm08.local ceph-mon[56019]: from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:31.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:30 vm03.local ceph-mon[48028]: Health check cleared: POOL_APP_NOT_ENABLED (was: 1 pool(s) do not have an application enabled) 2026-03-06T23:56:31.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:30 vm03.local ceph-mon[48028]: Cluster is now healthy 2026-03-06T23:56:31.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:30 vm03.local ceph-mon[48028]: from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:31.502 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch ls -f json 2026-03-06T23:56:31.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:31 vm08.local ceph-mon[56019]: pgmap v87: 225 pgs: 5 creating+peering, 1 unknown, 219 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 2.4 KiB/s wr, 104 op/s 2026-03-06T23:56:32.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:31 vm03.local ceph-mon[48028]: pgmap v87: 225 pgs: 5 creating+peering, 1 unknown, 219 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 2.4 KiB/s wr, 104 op/s 2026-03-06T23:56:32.129 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:32.552 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:32.552 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T22:54:01.168936Z", "last_refresh": "2026-03-06T22:56:10.048976Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:54:54.356818Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T22:53:59.566189Z", "last_refresh": "2026-03-06T22:56:09.193120Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:54:55.332232Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T22:53:59.141843Z", "last_refresh": "2026-03-06T22:56:09.193169Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T22:54:00.391211Z", "last_refresh": "2026-03-06T22:56:10.049002Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:29.279565Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2049, "monitor_port": 9049, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-06T22:56:12.119607Z", "ports": [2049, 9049], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-06T22:54:59.051172Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T22:53:58.796341Z", "last_refresh": "2026-03-06T22:56:09.193238Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:00.529428Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T22:54:22.648138Z", "last_refresh": "2026-03-06T22:56:09.193266Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:56:16.158012Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12049}, "status": {"created": "2026-03-06T22:56:12.112297Z", "ports": [12049], "running": 0, "size": 1}}, {"events": ["2026-03-06T22:54:58.093905Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T22:54:00.778915Z", "last_refresh": "2026-03-06T22:56:09.193204Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:10.345071Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T22:55:10.331918Z", "last_refresh": "2026-03-06T22:56:09.193293Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T22:55:00.531903Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T22:53:59.964507Z", "last_refresh": "2026-03-06T22:56:10.049028Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:14.501210Z service:rgw.foorgw [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foorgw", "service_name": "rgw.foorgw", "service_type": "rgw", "spec": {"rgw_frontend_port": 8800}, "status": {"created": "2026-03-06T22:56:14.495594Z", "ports": [8800], "running": 0, "size": 2}}] 2026-03-06T23:56:32.631 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T23:56:32.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:32 vm08.local ceph-mon[56019]: from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:33.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:32 vm03.local ceph-mon[48028]: from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:33.632 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch ls -f json 2026-03-06T23:56:33.925 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:33 vm03.local ceph-mon[48028]: pgmap v88: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 1.1 KiB/s wr, 43 op/s 2026-03-06T23:56:33.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:33 vm08.local ceph-mon[56019]: pgmap v88: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 1.1 KiB/s wr, 43 op/s 2026-03-06T23:56:34.189 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:34.568 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:34.568 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T22:54:01.168936Z", "last_refresh": "2026-03-06T22:56:10.048976Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:54:54.356818Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T22:53:59.566189Z", "last_refresh": "2026-03-06T22:56:09.193120Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:54:55.332232Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T22:53:59.141843Z", "last_refresh": "2026-03-06T22:56:09.193169Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T22:54:00.391211Z", "last_refresh": "2026-03-06T22:56:10.049002Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:34.019726Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2049, "monitor_port": 9049, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-06T22:56:12.119607Z", "ports": [2049, 9049], "running": 0, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-06T22:54:59.051172Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T22:53:58.796341Z", "last_refresh": "2026-03-06T22:56:09.193238Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:00.529428Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T22:54:22.648138Z", "last_refresh": "2026-03-06T22:56:09.193266Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:56:16.158012Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12049}, "status": {"created": "2026-03-06T22:56:12.112297Z", "ports": [12049], "running": 0, "size": 1}}, {"events": ["2026-03-06T22:54:58.093905Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T22:54:00.778915Z", "last_refresh": "2026-03-06T22:56:09.193204Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:10.345071Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T22:55:10.331918Z", "last_refresh": "2026-03-06T22:56:09.193293Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T22:55:00.531903Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T22:53:59.964507Z", "last_refresh": "2026-03-06T22:56:10.049028Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:14.501210Z service:rgw.foorgw [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foorgw", "service_name": "rgw.foorgw", "service_type": "rgw", "spec": {"rgw_frontend_port": 8800}, "status": {"created": "2026-03-06T22:56:14.495594Z", "ports": [8800], "running": 0, "size": 2}}] 2026-03-06T23:56:34.643 INFO:tasks.cephadm:nfs.foo has 0/1 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:34.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:34 vm08.local ceph-mon[56019]: from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:35.235 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:34 vm03.local ceph-mon[48028]: from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:35.643 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch ls -f json 2026-03-06T23:56:36.048 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:36.456 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:36.457 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T22:54:01.168936Z", "last_refresh": "2026-03-06T22:56:36.176901Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:54:54.356818Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T22:53:59.566189Z", "last_refresh": "2026-03-06T22:56:35.302487Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:54:55.332232Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T22:53:59.141843Z", "last_refresh": "2026-03-06T22:56:35.302538Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T22:54:00.391211Z", "last_refresh": "2026-03-06T22:56:36.176940Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:34.019726Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2049, "monitor_port": 9049, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-06T22:56:12.119607Z", "last_refresh": "2026-03-06T22:56:35.302795Z", "ports": [2049, 9049], "running": 4, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-06T22:54:59.051172Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T22:53:58.796341Z", "last_refresh": "2026-03-06T22:56:35.302600Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:00.529428Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T22:54:22.648138Z", "last_refresh": "2026-03-06T22:56:35.302629Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:56:16.158012Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12049}, "status": {"created": "2026-03-06T22:56:12.112297Z", "last_refresh": "2026-03-06T22:56:36.177126Z", "ports": [12049], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:54:58.093905Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T22:54:00.778915Z", "last_refresh": "2026-03-06T22:56:35.302570Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:10.345071Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T22:55:10.331918Z", "last_refresh": "2026-03-06T22:56:35.302658Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T22:55:00.531903Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T22:53:59.964507Z", "last_refresh": "2026-03-06T22:56:36.176968Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:14.501210Z service:rgw.foorgw [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foorgw", "service_name": "rgw.foorgw", "service_type": "rgw", "spec": {"rgw_frontend_port": 8800}, "status": {"created": "2026-03-06T22:56:14.495594Z", "last_refresh": "2026-03-06T22:56:35.302767Z", "ports": [8800], "running": 2, "size": 2}}] 2026-03-06T23:56:36.524 INFO:tasks.cephadm:nfs.foo has 1/1 2026-03-06T23:56:36.524 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-03-06T23:56:36.527 INFO:tasks.cephadm:Waiting for ceph service ingress.nfs.foo to start (timeout 300)... 2026-03-06T23:56:36.527 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- ceph orch ls -f json 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: pgmap v89: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 1.1 KiB/s wr, 46 op/s 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:36.617 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:36 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: pgmap v89: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 1.1 KiB/s wr, 46 op/s 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:36.744 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:36 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:36.906 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:37.289 INFO:teuthology.orchestra.run.vm03.stdout: 2026-03-06T23:56:37.291 INFO:teuthology.orchestra.run.vm03.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-03-06T22:54:01.168936Z", "last_refresh": "2026-03-06T22:56:36.176901Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:54:54.356818Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-03-06T22:53:59.566189Z", "last_refresh": "2026-03-06T22:56:35.302487Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:54:55.332232Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-03-06T22:53:59.141843Z", "last_refresh": "2026-03-06T22:56:35.302538Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-03-06T22:54:00.391211Z", "last_refresh": "2026-03-06T22:56:36.176940Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:34.019726Z service:ingress.nfs.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "nfs.foo", "service_name": "ingress.nfs.foo", "service_type": "ingress", "spec": {"backend_service": "nfs.foo", "first_virtual_router_id": 50, "frontend_port": 2049, "monitor_port": 9049, "virtual_ip": "12.12.1.103/22"}, "status": {"created": "2026-03-06T22:56:12.119607Z", "last_refresh": "2026-03-06T22:56:35.302795Z", "ports": [2049, 9049], "running": 4, "size": 4, "virtual_ip": "12.12.1.103/22"}}, {"events": ["2026-03-06T22:54:59.051172Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-03-06T22:53:58.796341Z", "last_refresh": "2026-03-06T22:56:35.302600Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:00.529428Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm03:192.168.123.103=vm03", "vm08:192.168.123.108=vm08"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-03-06T22:54:22.648138Z", "last_refresh": "2026-03-06T22:56:35.302629Z", "running": 2, "size": 2}}, {"events": ["2026-03-06T22:56:36.633864Z service:nfs.foo [INFO] \"service was created\""], "placement": {"count": 1}, "service_id": "foo", "service_name": "nfs.foo", "service_type": "nfs", "spec": {"port": 12049}, "status": {"created": "2026-03-06T22:56:12.112297Z", "last_refresh": "2026-03-06T22:56:36.177126Z", "ports": [12049], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:54:58.093905Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-03-06T22:54:00.778915Z", "last_refresh": "2026-03-06T22:56:35.302570Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-03-06T22:55:10.345071Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-03-06T22:55:10.331918Z", "last_refresh": "2026-03-06T22:56:35.302658Z", "running": 8, "size": 8}}, {"events": ["2026-03-06T22:55:00.531903Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-03-06T22:53:59.964507Z", "last_refresh": "2026-03-06T22:56:36.176968Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-03-06T22:56:14.501210Z service:rgw.foorgw [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "foorgw", "service_name": "rgw.foorgw", "service_type": "rgw", "spec": {"rgw_frontend_port": 8800}, "status": {"created": "2026-03-06T22:56:14.495594Z", "last_refresh": "2026-03-06T22:56:35.302767Z", "ports": [8800], "running": 2, "size": 2}}] 2026-03-06T23:56:37.377 INFO:tasks.cephadm:ingress.nfs.foo has 4/4 2026-03-06T23:56:37.377 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-06T23:56:37.379 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:37.379 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mkdir /mnt/foo' 2026-03-06T23:56:37.421 INFO:teuthology.orchestra.run.vm03.stderr:+ mkdir /mnt/foo 2026-03-06T23:56:37.422 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'sleep 5' 2026-03-06T23:56:37.493 INFO:teuthology.orchestra.run.vm03.stderr:+ sleep 5 2026-03-06T23:56:37.802 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:37 vm03.local ceph-mon[48028]: Checking dashboard <-> RGW credentials 2026-03-06T23:56:37.802 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:37 vm03.local ceph-mon[48028]: from='client.14666 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:37.802 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:37 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:37.802 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:37 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:37.802 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:37 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:37.802 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:37 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:37.802 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:37 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:37.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:37 vm08.local ceph-mon[56019]: Checking dashboard <-> RGW credentials 2026-03-06T23:56:37.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:37 vm08.local ceph-mon[56019]: from='client.14666 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:37.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:37 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:37.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:37 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:37.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:37 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:37.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:37 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:37.995 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:37 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:38.640 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-06T23:56:38.641 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-06T23:56:38.641 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: pgmap v90: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 887 B/s wr, 29 op/s 2026-03-06T23:56:38.641 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:38.641 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:38.641 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:38.641 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:56:38.641 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:38 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: Reconfiguring prometheus.vm03 (dependencies changed)... 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: Reconfiguring daemon prometheus.vm03 on vm03 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: pgmap v90: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 887 B/s wr, 29 op/s 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: from='client.14692 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:56:38.857 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:38 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:39.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:39 vm08.local ceph-mon[56019]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:56:39.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:39 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:39.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:39 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:40.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:39 vm03.local ceph-mon[48028]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-06T23:56:40.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:39 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:40.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:39 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: pgmap v91: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 9.6 KiB/s rd, 800 B/s wr, 16 op/s 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:40.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:40 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:41.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: pgmap v91: 225 pgs: 225 active+clean; 455 KiB data, 219 MiB used, 160 GiB / 160 GiB avail; 9.6 KiB/s rd, 800 B/s wr, 16 op/s 2026-03-06T23:56:41.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:41.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:41.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:41.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:41.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:41.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:41.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:40 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:41.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:41 vm08.local ceph-mon[56019]: Checking dashboard <-> RGW credentials 2026-03-06T23:56:41.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:41 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:42.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:41 vm03.local ceph-mon[48028]: Checking dashboard <-> RGW credentials 2026-03-06T23:56:42.037 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:41 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:42.496 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'mount -t nfs 12.12.1.103:/foouser /mnt/foo' 2026-03-06T23:56:42.562 INFO:teuthology.orchestra.run.vm03.stderr:+ mount -t nfs 12.12.1.103:/foouser /mnt/foo 2026-03-06T23:56:42.771 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:42 vm03.local ceph-mon[48028]: pgmap v92: 225 pgs: 225 active+clean; 456 KiB data, 224 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 853 B/s wr, 15 op/s 2026-03-06T23:56:42.772 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'test -d /mnt/foo/foobucket' 2026-03-06T23:56:42.840 INFO:teuthology.orchestra.run.vm03.stderr:+ test -d /mnt/foo/foobucket 2026-03-06T23:56:42.844 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'find /mnt/foo -ls' 2026-03-06T23:56:42.910 INFO:teuthology.orchestra.run.vm03.stderr:+ find /mnt/foo -ls 2026-03-06T23:56:42.916 INFO:teuthology.orchestra.run.vm03.stdout:9160472602707183340 0 drwxrwxrwx 1 root root 0 Mar 6 23:56 /mnt/foo 2026-03-06T23:56:42.916 INFO:teuthology.orchestra.run.vm03.stdout:6353740190238711428 0 drwxrwxrwx 1 root root 0 Mar 6 23:56 /mnt/foo/foobucket 2026-03-06T23:56:42.916 INFO:teuthology.orchestra.run.vm03.stdout:10810806159419214356 0 -rw-rw-rw- 1 root root 7 Mar 6 23:56 /mnt/foo/foobucket/myobject 2026-03-06T23:56:42.918 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'grep thebody /mnt/foo/foobucket/myobject' 2026-03-06T23:56:42.988 INFO:teuthology.orchestra.run.vm03.stderr:+ grep thebody /mnt/foo/foobucket/myobject 2026-03-06T23:56:42.991 INFO:teuthology.orchestra.run.vm03.stdout:thebody 2026-03-06T23:56:42.993 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'echo test > /mnt/foo/foobucket/newobject' 2026-03-06T23:56:42.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:42 vm08.local ceph-mon[56019]: pgmap v92: 225 pgs: 225 active+clean; 456 KiB data, 224 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 853 B/s wr, 15 op/s 2026-03-06T23:56:43.062 INFO:teuthology.orchestra.run.vm03.stderr:+ echo test 2026-03-06T23:56:43.078 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c sync 2026-03-06T23:56:43.146 INFO:teuthology.orchestra.run.vm03.stderr:+ sync 2026-03-06T23:56:43.564 INFO:teuthology.run_tasks:Running task python... 2026-03-06T23:56:43.566 INFO:tasks.python:Running python on role host.a host ubuntu@vm03.local 2026-03-06T23:56:43.566 INFO:tasks.python:import boto3 import json from io import BytesIO with open('/tmp/user.json', 'rt') as f: info = json.loads(f.read()) s3 = boto3.resource( 's3', aws_access_key_id=info['keys'][0]['access_key'], aws_secret_access_key=info['keys'][0]['secret_key'], endpoint_url='http://localhost:8800', ) bucket = s3.Bucket('foobucket') data = BytesIO() bucket.download_fileobj(Fileobj=data, Key='newobject') print(data.getvalue()) assert data.getvalue().decode() == 'test\n' 2026-03-06T23:56:43.566 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest python3 2026-03-06T23:56:43.829 INFO:teuthology.orchestra.run.vm03.stdout:b'test\n' 2026-03-06T23:56:43.869 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:43 vm03.local ceph-mon[48028]: pgmap v93: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 11 op/s 2026-03-06T23:56:43.870 INFO:teuthology.run_tasks:Running task vip.exec... 2026-03-06T23:56:43.872 INFO:tasks.vip:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:43.872 DEBUG:teuthology.orchestra.run.vm03:> sudo TESTDIR=/home/ubuntu/cephtest bash -ex -c 'umount /mnt/foo' 2026-03-06T23:56:43.896 INFO:teuthology.orchestra.run.vm03.stderr:+ umount /mnt/foo 2026-03-06T23:56:43.914 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T23:56:43.917 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:43.917 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph nfs export rm foo /foouser' 2026-03-06T23:56:43.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:43 vm08.local ceph-mon[56019]: pgmap v93: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 11 op/s 2026-03-06T23:56:44.311 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:44.725 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph nfs cluster rm foo' 2026-03-06T23:56:44.994 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:44 vm08.local ceph-mon[56019]: from='client.14718 -' entity='client.admin' cmd=[{"prefix": "nfs export rm", "cluster_id": "foo", "pseudo_path": "/foouser", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:45.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:44 vm03.local ceph-mon[48028]: from='client.14718 -' entity='client.admin' cmd=[{"prefix": "nfs export rm", "cluster_id": "foo", "pseudo_path": "/foouser", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:45.074 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:45.556 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-06T23:56:45.559 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm03.local 2026-03-06T23:56:45.559 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-03-06T23:56:45.951 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:46.050 INFO:teuthology.orchestra.run.vm03.stdout:167 167 2026-03-06T23:56:46.100 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch status' 2026-03-06T23:56:46.505 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:46.728 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:46 vm08.local ceph-mon[56019]: pgmap v94: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 426 B/s wr, 14 op/s 2026-03-06T23:56:46.728 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:46 vm08.local ceph-mon[56019]: from='client.14722 -' entity='client.admin' cmd=[{"prefix": "nfs cluster rm", "cluster_id": "foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:46.728 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:46 vm08.local ceph-mon[56019]: Remove service ingress.nfs.foo 2026-03-06T23:56:46.728 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:46 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:46.728 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:46 vm08.local ceph-mon[56019]: Remove service nfs.foo 2026-03-06T23:56:46.728 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:46 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:46.729 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:46 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:46.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:46 vm03.local ceph-mon[48028]: pgmap v94: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 12 KiB/s rd, 426 B/s wr, 14 op/s 2026-03-06T23:56:46.787 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:46 vm03.local ceph-mon[48028]: from='client.14722 -' entity='client.admin' cmd=[{"prefix": "nfs cluster rm", "cluster_id": "foo", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:46.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:46 vm03.local ceph-mon[48028]: Remove service ingress.nfs.foo 2026-03-06T23:56:46.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:46 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:46.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:46 vm03.local ceph-mon[48028]: Remove service nfs.foo 2026-03-06T23:56:46.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:46 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-06T23:56:46.788 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:46 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:46.940 INFO:teuthology.orchestra.run.vm03.stdout:Backend: cephadm 2026-03-06T23:56:46.940 INFO:teuthology.orchestra.run.vm03.stdout:Available: Yes 2026-03-06T23:56:46.940 INFO:teuthology.orchestra.run.vm03.stdout:Paused: No 2026-03-06T23:56:47.016 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch ps' 2026-03-06T23:56:47.441 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:47.771 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:47 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:47.771 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:47 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:47.771 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:47 vm03.local ceph-mon[48028]: pgmap v95: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 597 B/s wr, 14 op/s 2026-03-06T23:56:47.771 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:47 vm03.local ceph-mon[48028]: from='client.14726 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:47.879 INFO:teuthology.orchestra.run.vm03.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-06T23:56:47.879 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager.vm03 vm03 *:9093,9094 running (98s) 8s ago 2m 21.4M - 0.25.0 c8568f914cd2 1dbe2f74b08c 2026-03-06T23:56:47.879 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm03 vm03 running (2m) 8s ago 2m 9202k - 19.2.3-39-g340d3c24fc6 8bccc98d839a c7d3bc3e325d 2026-03-06T23:56:47.879 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter.vm08 vm08 running (113s) 1s ago 113s 6727k - 19.2.3-39-g340d3c24fc6 8bccc98d839a 9e1eaa9dfb3a 2026-03-06T23:56:47.879 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm03 vm03 running (2m) 8s ago 2m 11.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 97554af5c861 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:crash.vm08 vm08 running (112s) 1s ago 112s 11.1M - 19.2.3-39-g340d3c24fc6 8bccc98d839a fd70e00505f9 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:grafana.vm03 vm03 *:3000 running (97s) 8s ago 2m 75.3M - 10.4.0 c8b91775d855 6ad012d74bd3 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.nfs.foo.vm03.pnfmnq vm03 *:2049,9049 running (27s) 8s ago 27s 3707k - 2.3.17-d1c9119 e85424b0d443 b97f56f3b165 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:haproxy.nfs.foo.vm08.kzoyjo vm08 *:2049,9049 running (23s) 1s ago 23s 3728k - 2.3.17-d1c9119 e85424b0d443 250ae95eddea 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:keepalived.nfs.foo.vm03.pgxdbu vm03 running (13s) 8s ago 13s 2382k - 2.2.4 4a3a1ff181d9 145a7c200e44 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:keepalived.nfs.foo.vm08.hxbwag vm08 running (18s) 1s ago 18s 2373k - 2.2.4 4a3a1ff181d9 9abc2742ac62 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm03.xzkqce vm03 *:9283,8765,8443 running (3m) 8s ago 3m 561M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 723e49aecc15 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:mgr.vm08.bnopnr vm08 *:8443,9283,8765 running (108s) 1s ago 108s 480M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 23af230679dd 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm03 vm03 running (3m) 8s ago 3m 56.9M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a 49f39418d469 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:mon.vm08 vm08 running (107s) 1s ago 107s 51.0M 2048M 19.2.3-39-g340d3c24fc6 8bccc98d839a f4a7f049da3e 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:nfs.foo.0.0.vm03.cvyxvl vm03 *:12049 running (32s) 8s ago 31s 116M - 5.9 8bccc98d839a 50760982f14c 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm03 vm03 *:9100 running (2m) 8s ago 2m 9479k - 1.7.0 72c9c2088986 252a934ab624 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter.vm08 vm08 *:9100 running (109s) 1s ago 109s 9424k - 1.7.0 72c9c2088986 08a294c1dfce 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.0 vm08 running (74s) 1s ago 74s 50.8M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a db428a6afa60 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.1 vm03 running (73s) 8s ago 73s 48.7M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a d1e121ceddcf 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.2 vm08 running (71s) 1s ago 71s 68.4M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 10fe666c3beb 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.3 vm03 running (70s) 8s ago 70s 71.8M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a c073c0190b0a 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.4 vm08 running (68s) 1s ago 68s 70.1M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 11843ffaf4c6 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.5 vm03 running (66s) 8s ago 66s 69.5M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 4f43b0eaf42c 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.6 vm08 running (65s) 1s ago 64s 67.5M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 833d32daeb35 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:osd.7 vm03 running (62s) 8s ago 62s 50.6M 4096M 19.2.3-39-g340d3c24fc6 8bccc98d839a 92a834b69a71 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:prometheus.vm03 vm03 *:9095 running (10s) 8s ago 2m 35.0M - 2.51.0 1d3b7f56885b 07ec7425ce5c 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foorgw.vm03.ckntua vm03 *:8800 running (33s) 8s ago 33s 101M - 19.2.3-39-g340d3c24fc6 8bccc98d839a ed5cff732c98 2026-03-06T23:56:47.880 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foorgw.vm08.taucyu vm08 *:8800 running (34s) 1s ago 34s 97.0M - 19.2.3-39-g340d3c24fc6 8bccc98d839a 32b4737f6831 2026-03-06T23:56:47.953 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch ls' 2026-03-06T23:56:48.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:47 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:48.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:47 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:48.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:47 vm08.local ceph-mon[56019]: pgmap v95: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 597 B/s wr, 14 op/s 2026-03-06T23:56:48.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:47 vm08.local ceph-mon[56019]: from='client.14726 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:48.358 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:alertmanager ?:9093,9094 1/1 0s ago 2m count:1 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:ceph-exporter 2/2 1s ago 2m * 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:crash 2/2 1s ago 2m * 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:grafana ?:3000 1/1 0s ago 2m count:1 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:ingress.nfs.foo 12.12.1.103:2049,9049 4/4 36s count:2 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:mgr 2/2 1s ago 2m count:2 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:mon 2/2 1s ago 2m vm03:192.168.123.103=vm03;vm08:192.168.123.108=vm08;count:2 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:nfs.foo ?:12049 1/1 36s count:1 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:node-exporter ?:9100 2/2 1s ago 2m * 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:osd.all-available-devices 8 1s ago 98s * 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:prometheus ?:9095 1/1 0s ago 2m count:1 2026-03-06T23:56:48.745 INFO:teuthology.orchestra.run.vm03.stdout:rgw.foorgw ?:8800 2/2 1s ago 34s count:2 2026-03-06T23:56:48.824 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch host ls' 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='client.14730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: Removing orphan daemon nfs.foo.0.0.vm03.cvyxvl... 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: Removing daemon nfs.foo.0.0.vm03.cvyxvl from vm03 -- ports [12049] 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.15", "id": [4, 6]}]: dispatch 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "8.1c", "id": [5, 3]}]: dispatch 2026-03-06T23:56:49.038 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:48 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:49.186 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:49.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='client.14730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: Removing orphan daemon nfs.foo.0.0.vm03.cvyxvl... 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: Removing daemon nfs.foo.0.0.vm03.cvyxvl from vm03 -- ports [12049] 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.15", "id": [4, 6]}]: dispatch 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "8.1c", "id": [5, 3]}]: dispatch 2026-03-06T23:56:49.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:48 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-06T23:56:49.533 INFO:teuthology.orchestra.run.vm03.stdout:HOST ADDR LABELS STATUS 2026-03-06T23:56:49.533 INFO:teuthology.orchestra.run.vm03.stdout:vm03 192.168.123.103 2026-03-06T23:56:49.533 INFO:teuthology.orchestra.run.vm03.stdout:vm08 192.168.123.108 2026-03-06T23:56:49.533 INFO:teuthology.orchestra.run.vm03.stdout:2 hosts in cluster 2026-03-06T23:56:49.584 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch device ls' 2026-03-06T23:56:49.931 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:50.214 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:49 vm03.local ceph-mon[48028]: from='client.14734 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:50.214 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:49 vm03.local ceph-mon[48028]: pgmap v96: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 682 B/s wr, 16 op/s 2026-03-06T23:56:50.214 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:49 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.15", "id": [4, 6]}]': finished 2026-03-06T23:56:50.214 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:49 vm03.local ceph-mon[48028]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "8.1c", "id": [5, 3]}]': finished 2026-03-06T23:56:50.214 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:49 vm03.local ceph-mon[48028]: osdmap e49: 8 total, 8 up, 8 in 2026-03-06T23:56:50.214 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:49 vm03.local ceph-mon[48028]: from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:50.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:49 vm08.local ceph-mon[56019]: from='client.14734 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:50.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:49 vm08.local ceph-mon[56019]: pgmap v96: 225 pgs: 225 active+clean; 456 KiB data, 228 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 682 B/s wr, 16 op/s 2026-03-06T23:56:50.244 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:49 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.15", "id": [4, 6]}]': finished 2026-03-06T23:56:50.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:49 vm08.local ceph-mon[56019]: from='mgr.14221 192.168.123.103:0/822821252' entity='mgr.vm03.xzkqce' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "8.1c", "id": [5, 3]}]': finished 2026-03-06T23:56:50.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:49 vm08.local ceph-mon[56019]: osdmap e49: 8 total, 8 up, 8 in 2026-03-06T23:56:50.245 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:49 vm08.local ceph-mon[56019]: from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 37s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdb hdd DWNBRSTVMM03001 20.0G No 37s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdc hdd DWNBRSTVMM03002 20.0G No 37s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vdd hdd DWNBRSTVMM03003 20.0G No 37s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm03 /dev/vde hdd DWNBRSTVMM03004 20.0G No 37s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 38s ago Has a FileSystem, Insufficient space (<5GB) 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdb hdd DWNBRSTVMM08001 20.0G No 38s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdc hdd DWNBRSTVMM08002 20.0G No 38s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vdd hdd DWNBRSTVMM08003 20.0G No 38s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.290 INFO:teuthology.orchestra.run.vm03.stdout:vm08 /dev/vde hdd DWNBRSTVMM08004 20.0G No 38s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-03-06T23:56:50.343 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:cobaltcore-storage-v19.2.3-fasttrack-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-03-06T23:56:50.699 INFO:teuthology.orchestra.run.vm03.stderr:Inferring config /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/mon.vm03/config 2026-03-06T23:56:51.050 INFO:teuthology.orchestra.run.vm03.stdout:osd.all-available-devices 8 4s ago 100s * 2026-03-06T23:56:51.102 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-03-06T23:56:51.105 INFO:tasks.vip:Removing 12.12.0.103 (and any VIPs) on vm03.local iface eth0... 2026-03-06T23:56:51.105 DEBUG:teuthology.orchestra.run.vm03:> sudo ip addr del 12.12.0.103/22 dev eth0 2026-03-06T23:56:51.135 DEBUG:teuthology.orchestra.run.vm03:> sudo ip addr del 12.12.1.103/22 dev eth0 2026-03-06T23:56:51.205 INFO:tasks.vip:Removing 12.12.0.108 (and any VIPs) on vm08.local iface eth0... 2026-03-06T23:56:51.205 DEBUG:teuthology.orchestra.run.vm08:> sudo ip addr del 12.12.0.108/22 dev eth0 2026-03-06T23:56:51.233 DEBUG:teuthology.orchestra.run.vm08:> sudo ip addr del 12.12.1.103/22 dev eth0 2026-03-06T23:56:51.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:50 vm03.local ceph-mon[48028]: osdmap e50: 8 total, 8 up, 8 in 2026-03-06T23:56:51.288 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:50 vm03.local ceph-mon[48028]: from='client.14742 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:51.294 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:50 vm08.local ceph-mon[56019]: osdmap e50: 8 total, 8 up, 8 in 2026-03-06T23:56:51.295 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:50 vm08.local ceph-mon[56019]: from='client.14742 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-06T23:56:51.297 INFO:teuthology.orchestra.run.vm08.stderr:Error: ipv4: Address not found. 2026-03-06T23:56:51.298 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-03-06T23:56:51.298 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-06T23:56:51.301 INFO:tasks.cephadm:Teardown begin 2026-03-06T23:56:51.301 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T23:56:51.328 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T23:56:51.363 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-06T23:56:51.363 DEBUG:teuthology.orchestra.run.vm03:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-06T23:56:51.385 DEBUG:teuthology.orchestra.run.vm08:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-06T23:56:51.420 INFO:tasks.cephadm:Stopping all daemons... 2026-03-06T23:56:51.420 INFO:tasks.cephadm.mon.vm03:Stopping mon.vm03... 2026-03-06T23:56:51.420 DEBUG:teuthology.orchestra.run.vm03:> sudo systemctl stop ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm03 2026-03-06T23:56:51.636 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:51 vm03.local systemd[1]: Stopping Ceph mon.vm03 for 386eb88a-19af-11f1-876d-93c9c802cc09... 2026-03-06T23:56:51.636 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:51 vm03.local ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm03[48024]: 2026-03-06T22:56:51.532+0000 7fbca9064640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm03 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-06T23:56:51.636 INFO:journalctl@ceph.mon.vm03.vm03.stdout:Mar 06 23:56:51 vm03.local ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm03[48024]: 2026-03-06T22:56:51.532+0000 7fbca9064640 -1 mon.vm03@0(leader) e2 *** Got Signal Terminated *** 2026-03-06T23:56:51.833 DEBUG:teuthology.orchestra.run.vm03:> sudo pkill -f 'journalctl -f -n 0 -u ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm03.service' 2026-03-06T23:56:51.873 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-06T23:56:51.873 INFO:tasks.cephadm.mon.vm03:Stopped mon.vm03 2026-03-06T23:56:51.873 INFO:tasks.cephadm.mon.vm08:Stopping mon.vm08... 2026-03-06T23:56:51.873 DEBUG:teuthology.orchestra.run.vm08:> sudo systemctl stop ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm08 2026-03-06T23:56:52.114 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:51 vm08.local systemd[1]: Stopping Ceph mon.vm08 for 386eb88a-19af-11f1-876d-93c9c802cc09... 2026-03-06T23:56:52.114 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:51 vm08.local ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08[56012]: 2026-03-06T22:56:51.977+0000 7f467cd92640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm08 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-06T23:56:52.114 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:51 vm08.local ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08[56012]: 2026-03-06T22:56:51.977+0000 7f467cd92640 -1 mon.vm08@1(peon) e2 *** Got Signal Terminated *** 2026-03-06T23:56:52.114 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:52 vm08.local podman[77555]: 2026-03-06 23:56:52.031328556 +0100 CET m=+0.071411448 container died f4a7f049da3e84db210984b273391def91d1eec5d56d7e3d34e21c7c08f32a91 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-03-06T23:56:52.114 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:52 vm08.local podman[77555]: 2026-03-06 23:56:52.051531913 +0100 CET m=+0.091614805 container remove f4a7f049da3e84db210984b273391def91d1eec5d56d7e3d34e21c7c08f32a91 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:ffa52c72fad7bdd2657408de9cf8d87fc2c72f716d1a00277ba13f7c12b404e0, name=ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/irq0/ceph.git, CEPH_REF=19.2.3-39-g340d3c24fc6, CEPH_SHA1=340d3c24fc6ae7529322dc7ccee6c6cb2589da0a, FROM_IMAGE=rockylinux:9) 2026-03-06T23:56:52.114 INFO:journalctl@ceph.mon.vm08.vm08.stdout:Mar 06 23:56:52 vm08.local bash[77555]: ceph-386eb88a-19af-11f1-876d-93c9c802cc09-mon-vm08 2026-03-06T23:56:52.123 DEBUG:teuthology.orchestra.run.vm08:> sudo pkill -f 'journalctl -f -n 0 -u ceph-386eb88a-19af-11f1-876d-93c9c802cc09@mon.vm08.service' 2026-03-06T23:56:52.161 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-06T23:56:52.161 INFO:tasks.cephadm.mon.vm08:Stopped mon.vm08 2026-03-06T23:56:52.161 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 --force --keep-logs 2026-03-06T23:56:58.831 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:57:38.136 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 --force --keep-logs 2026-03-06T23:57:38.428 INFO:teuthology.orchestra.run.vm08.stdout:Deleting cluster with fsid: 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:58:16.390 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T23:58:16.418 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-06T23:58:16.447 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-06T23:58:16.447 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/crash to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416/remote/vm03/crash 2026-03-06T23:58:16.447 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/crash -- . 2026-03-06T23:58:16.486 INFO:teuthology.orchestra.run.vm03.stderr:tar: /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/crash: Cannot open: No such file or directory 2026-03-06T23:58:16.486 INFO:teuthology.orchestra.run.vm03.stderr:tar: Error is not recoverable: exiting now 2026-03-06T23:58:16.487 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/crash to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416/remote/vm08/crash 2026-03-06T23:58:16.487 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/crash -- . 2026-03-06T23:58:16.512 INFO:teuthology.orchestra.run.vm08.stderr:tar: /var/lib/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/crash: Cannot open: No such file or directory 2026-03-06T23:58:16.512 INFO:teuthology.orchestra.run.vm08.stderr:tar: Error is not recoverable: exiting now 2026-03-06T23:58:16.513 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-06T23:58:16.513 DEBUG:teuthology.orchestra.run.vm03:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-03-06T23:58:16.557 INFO:tasks.cephadm:Compressing logs... 2026-03-06T23:58:16.557 DEBUG:teuthology.orchestra.run.vm03:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T23:58:16.600 DEBUG:teuthology.orchestra.run.vm08:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T23:58:16.624 INFO:teuthology.orchestra.run.vm03.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-06T23:58:16.625 INFO:teuthology.orchestra.run.vm08.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-06T23:58:16.626 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-06T23:58:16.626 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mon.vm03.log 2026-03-06T23:58:16.626 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-06T23:58:16.626 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-volume.log 2026-03-06T23:58:16.626 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.log 2026-03-06T23:58:16.627 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/cephadm.log: 91.8% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-06T23:58:16.628 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.ceph-exporter.vm08.log 2026-03-06T23:58:16.628 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mgr.vm08.bnopnr.log 2026-03-06T23:58:16.629 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.ceph-exporter.vm08.log: 30.8% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.ceph-exporter.vm08.log.gz 2026-03-06T23:58:16.630 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mon.vm08.log 2026-03-06T23:58:16.633 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mon.vm03.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mgr.vm03.xzkqce.log 2026-03-06T23:58:16.633 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mgr.vm08.bnopnr.log: 91.2% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mgr.vm08.bnopnr.log.gz 2026-03-06T23:58:16.634 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.log: 83.7% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.log.gz 2026-03-06T23:58:16.635 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.audit.log 2026-03-06T23:58:16.638 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.audit.log 2026-03-06T23:58:16.640 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mon.vm08.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.log 2026-03-06T23:58:16.643 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.audit.log: 90.9% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.audit.log.gz 2026-03-06T23:58:16.643 INFO:teuthology.orchestra.run.vm08.stderr: 93.2% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-volume.log.gz 2026-03-06T23:58:16.643 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.cephadm.log 2026-03-06T23:58:16.644 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.log: 83.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.log.gz 2026-03-06T23:58:16.644 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.0.log 2026-03-06T23:58:16.644 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.cephadm.log: 81.8% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.cephadm.log.gz 2026-03-06T23:58:16.645 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.2.log 2026-03-06T23:58:16.646 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mgr.vm03.xzkqce.log: 91.6% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-06T23:58:16.646 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.cephadm.log 2026-03-06T23:58:16.650 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.4.log 2026-03-06T23:58:16.650 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.audit.log: 90.7% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.audit.log.gz 2026-03-06T23:58:16.653 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-volume.log 2026-03-06T23:58:16.653 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.cephadm.log: 83.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph.cephadm.log.gz 2026-03-06T23:58:16.658 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.ceph-exporter.vm03.log 2026-03-06T23:58:16.662 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.6.log 2026-03-06T23:58:16.667 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.1.log 2026-03-06T23:58:16.669 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.rgw.foorgw.vm08.taucyu.log 2026-03-06T23:58:16.671 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.ceph-exporter.vm03.log: 92.4% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.ceph-exporter.vm03.log.gz 2026-03-06T23:58:16.675 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.3.log 2026-03-06T23:58:16.680 INFO:teuthology.orchestra.run.vm08.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.6.log: /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.rgw.foorgw.vm08.taucyu.log: 59.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.rgw.foorgw.vm08.taucyu.log.gz 2026-03-06T23:58:16.682 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.5.log 2026-03-06T23:58:16.687 INFO:teuthology.orchestra.run.vm08.stderr: 92.1% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mon.vm08.log.gz 2026-03-06T23:58:16.692 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.7.log 2026-03-06T23:58:16.698 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.rgw.foorgw.vm03.ckntua.log 2026-03-06T23:58:16.698 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.7.log: 93.2% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-volume.log.gz 2026-03-06T23:58:16.710 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ops-log-ceph-client.rgw.foorgw.vm03.ckntua.log 2026-03-06T23:58:16.710 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.rgw.foorgw.vm03.ckntua.log: 68.5% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-client.rgw.foorgw.vm03.ckntua.log.gz 2026-03-06T23:58:16.721 INFO:teuthology.orchestra.run.vm03.stderr:/var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ops-log-ceph-client.rgw.foorgw.vm03.ckntua.log: 78.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ops-log-ceph-client.rgw.foorgw.vm03.ckntua.log.gz 2026-03-06T23:58:16.812 INFO:teuthology.orchestra.run.vm03.stderr: 89.4% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mgr.vm03.xzkqce.log.gz 2026-03-06T23:58:16.887 INFO:teuthology.orchestra.run.vm08.stderr: 93.8% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.6.log.gz 2026-03-06T23:58:16.908 INFO:teuthology.orchestra.run.vm03.stderr: 91.6% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-mon.vm03.log.gz 2026-03-06T23:58:16.933 INFO:teuthology.orchestra.run.vm08.stderr: 94.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.4.log.gz 2026-03-06T23:58:16.951 INFO:teuthology.orchestra.run.vm08.stderr: 93.9% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.2.log.gz 2026-03-06T23:58:16.969 INFO:teuthology.orchestra.run.vm03.stderr: 94.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.7.log.gz 2026-03-06T23:58:16.975 INFO:teuthology.orchestra.run.vm08.stderr: 93.9% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.0.log.gz 2026-03-06T23:58:16.976 INFO:teuthology.orchestra.run.vm03.stderr: 94.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.1.log.gz 2026-03-06T23:58:16.977 INFO:teuthology.orchestra.run.vm08.stderr: 2026-03-06T23:58:16.977 INFO:teuthology.orchestra.run.vm08.stderr:real 0m0.362s 2026-03-06T23:58:16.977 INFO:teuthology.orchestra.run.vm08.stderr:user 0m0.643s 2026-03-06T23:58:16.977 INFO:teuthology.orchestra.run.vm08.stderr:sys 0m0.042s 2026-03-06T23:58:16.978 INFO:teuthology.orchestra.run.vm03.stderr: 94.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.5.log.gz 2026-03-06T23:58:17.024 INFO:teuthology.orchestra.run.vm03.stderr: 94.0% -- replaced with /var/log/ceph/386eb88a-19af-11f1-876d-93c9c802cc09/ceph-osd.3.log.gz 2026-03-06T23:58:17.026 INFO:teuthology.orchestra.run.vm03.stderr: 2026-03-06T23:58:17.026 INFO:teuthology.orchestra.run.vm03.stderr:real 0m0.412s 2026-03-06T23:58:17.026 INFO:teuthology.orchestra.run.vm03.stderr:user 0m0.709s 2026-03-06T23:58:17.027 INFO:teuthology.orchestra.run.vm03.stderr:sys 0m0.056s 2026-03-06T23:58:17.027 INFO:tasks.cephadm:Archiving logs... 2026-03-06T23:58:17.027 DEBUG:teuthology.misc:Transferring archived files from vm03:/var/log/ceph to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416/remote/vm03/log 2026-03-06T23:58:17.027 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-06T23:58:17.134 DEBUG:teuthology.misc:Transferring archived files from vm08:/var/log/ceph to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416/remote/vm08/log 2026-03-06T23:58:17.135 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-06T23:58:17.198 INFO:tasks.cephadm:Removing cluster... 2026-03-06T23:58:17.198 DEBUG:teuthology.orchestra.run.vm03:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 --force 2026-03-06T23:58:17.487 INFO:teuthology.orchestra.run.vm03.stdout:Deleting cluster with fsid: 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:58:17.582 DEBUG:teuthology.orchestra.run.vm08:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 386eb88a-19af-11f1-876d-93c9c802cc09 --force 2026-03-06T23:58:17.876 INFO:teuthology.orchestra.run.vm08.stdout:Deleting cluster with fsid: 386eb88a-19af-11f1-876d-93c9c802cc09 2026-03-06T23:58:17.984 INFO:tasks.cephadm:Removing cephadm ... 2026-03-06T23:58:17.984 DEBUG:teuthology.orchestra.run.vm03:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-06T23:58:17.999 DEBUG:teuthology.orchestra.run.vm08:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-06T23:58:18.016 INFO:tasks.cephadm:Teardown complete 2026-03-06T23:58:18.016 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-06T23:58:18.018 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-06T23:58:18.018 DEBUG:teuthology.orchestra.run.vm03:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T23:58:18.042 DEBUG:teuthology.orchestra.run.vm08:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-06T23:58:18.055 INFO:teuthology.orchestra.run.vm03.stderr:bash: line 1: ntpq: command not found 2026-03-06T23:58:18.059 INFO:teuthology.orchestra.run.vm03.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T23:58:18.059 INFO:teuthology.orchestra.run.vm03.stdout:=============================================================================== 2026-03-06T23:58:18.059 INFO:teuthology.orchestra.run.vm03.stdout:^+ ns1.blazing.de 3 6 377 56 -1304us[-1304us] +/- 17ms 2026-03-06T23:58:18.059 INFO:teuthology.orchestra.run.vm03.stdout:^+ dc8wan.de 2 6 377 58 -1487us[-1502us] +/- 48ms 2026-03-06T23:58:18.059 INFO:teuthology.orchestra.run.vm03.stdout:^+ sv5.ggsrv.de 2 6 377 58 +4253us[+4238us] +/- 23ms 2026-03-06T23:58:18.059 INFO:teuthology.orchestra.run.vm03.stdout:^* ntp5.kernfusion.at 2 6 377 57 -1396us[-1411us] +/- 17ms 2026-03-06T23:58:18.072 INFO:teuthology.orchestra.run.vm08.stderr:bash: line 1: ntpq: command not found 2026-03-06T23:58:18.074 INFO:teuthology.orchestra.run.vm08.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-06T23:58:18.074 INFO:teuthology.orchestra.run.vm08.stdout:=============================================================================== 2026-03-06T23:58:18.074 INFO:teuthology.orchestra.run.vm08.stdout:^+ sv5.ggsrv.de 2 6 377 58 +4254us[+4257us] +/- 23ms 2026-03-06T23:58:18.075 INFO:teuthology.orchestra.run.vm08.stdout:^* ntp5.kernfusion.at 2 6 377 58 -1363us[-1360us] +/- 17ms 2026-03-06T23:58:18.075 INFO:teuthology.orchestra.run.vm08.stdout:^+ ns1.blazing.de 3 6 377 57 -1279us[-1279us] +/- 17ms 2026-03-06T23:58:18.075 INFO:teuthology.orchestra.run.vm08.stdout:^+ dc8wan.de 2 6 377 58 -1440us[-1440us] +/- 48ms 2026-03-06T23:58:18.075 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-06T23:58:18.078 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-06T23:58:18.078 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-06T23:58:18.081 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-06T23:58:18.084 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-06T23:58:18.087 INFO:teuthology.task.internal:Duration was 435.313899 seconds 2026-03-06T23:58:18.088 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-06T23:58:18.090 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-06T23:58:18.090 DEBUG:teuthology.orchestra.run.vm03:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-06T23:58:18.102 DEBUG:teuthology.orchestra.run.vm08:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-06T23:58:18.139 INFO:teuthology.orchestra.run.vm03.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T23:58:18.156 INFO:teuthology.orchestra.run.vm08.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-06T23:58:18.519 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-06T23:58:18.519 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm03.local 2026-03-06T23:58:18.520 DEBUG:teuthology.orchestra.run.vm03:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-06T23:58:18.567 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm08.local 2026-03-06T23:58:18.567 DEBUG:teuthology.orchestra.run.vm08:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-06T23:58:18.591 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-06T23:58:18.592 DEBUG:teuthology.orchestra.run.vm03:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T23:58:18.609 DEBUG:teuthology.orchestra.run.vm08:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T23:58:19.082 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-06T23:58:19.082 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T23:58:19.084 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-06T23:58:19.109 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm03.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm08.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm03.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm08.stderr: --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-06T23:58:19.110 INFO:teuthology.orchestra.run.vm08.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: /home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-06T23:58:19.241 INFO:teuthology.orchestra.run.vm03.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.9% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-06T23:58:19.244 INFO:teuthology.orchestra.run.vm08.stderr: 98.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-06T23:58:19.247 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-06T23:58:19.251 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-06T23:58:19.251 DEBUG:teuthology.orchestra.run.vm03:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-06T23:58:19.307 DEBUG:teuthology.orchestra.run.vm08:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-06T23:58:19.332 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-06T23:58:19.336 DEBUG:teuthology.orchestra.run.vm03:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-06T23:58:19.349 DEBUG:teuthology.orchestra.run.vm08:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-06T23:58:19.371 INFO:teuthology.orchestra.run.vm03.stdout:kernel.core_pattern = core 2026-03-06T23:58:19.398 INFO:teuthology.orchestra.run.vm08.stdout:kernel.core_pattern = core 2026-03-06T23:58:19.411 DEBUG:teuthology.orchestra.run.vm03:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-06T23:58:19.442 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:58:19.443 DEBUG:teuthology.orchestra.run.vm08:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-06T23:58:19.466 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-06T23:58:19.467 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-06T23:58:19.470 INFO:teuthology.task.internal:Transferring archived files... 2026-03-06T23:58:19.470 DEBUG:teuthology.misc:Transferring archived files from vm03:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416/remote/vm03 2026-03-06T23:58:19.470 DEBUG:teuthology.orchestra.run.vm03:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-06T23:58:19.512 DEBUG:teuthology.misc:Transferring archived files from vm08:/home/ubuntu/cephtest/archive to /archive/irq0-2026-03-06_20:21:59-orch:cephadm:smoke-roleless-cobaltcore-storage-v19.2.3-fasttrack-5-none-default-vps/416/remote/vm08 2026-03-06T23:58:19.512 DEBUG:teuthology.orchestra.run.vm08:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-06T23:58:19.542 INFO:teuthology.task.internal:Removing archive directory... 2026-03-06T23:58:19.542 DEBUG:teuthology.orchestra.run.vm03:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-06T23:58:19.553 DEBUG:teuthology.orchestra.run.vm08:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-06T23:58:19.599 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-06T23:58:19.603 INFO:teuthology.task.internal:Not uploading archives. 2026-03-06T23:58:19.603 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-06T23:58:19.605 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-06T23:58:19.606 DEBUG:teuthology.orchestra.run.vm03:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-06T23:58:19.609 DEBUG:teuthology.orchestra.run.vm08:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-06T23:58:19.624 INFO:teuthology.orchestra.run.vm03.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 6 23:58 /home/ubuntu/cephtest 2026-03-06T23:58:19.657 INFO:teuthology.orchestra.run.vm08.stdout: 8532144 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 6 23:58 /home/ubuntu/cephtest 2026-03-06T23:58:19.657 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-06T23:58:19.663 INFO:teuthology.run:Summary data: description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 1-start 2-services/nfs-ingress-rgw-user 3-final} duration: 435.313898563385 owner: irq0 success: true 2026-03-06T23:58:19.664 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-06T23:58:19.681 INFO:teuthology.run:pass