==================================================================================================== ========================================= Pod describe ========================================= ==================================================================================================== Name: lodemon-6455d5ff7c-zzndq Namespace: xlou Priority: 0 Node: gke-xlou-cdm-default-pool-f05840a3-s6rh/10.142.0.104 Start Time: Tue, 13 Jun 2023 15:31:52 +0000 Labels: app=lodemon app.kubernetes.io/name=lodemon pod-template-hash=6455d5ff7c skaffold.dev/run-id=61c6006f-2295-4e3b-8e9b-5232ff175ccb Annotations: Status: Running IP: 10.106.46.66 IPs: IP: 10.106.46.66 Controlled By: ReplicaSet/lodemon-6455d5ff7c Containers: lodemon: Container ID: containerd://f89040a033c11d2f0f937447fb2b1a6aabd9a416a0a5840e22d9ee20d6f8b045 Image: gcr.io/engineeringpit/lodestar-images/lodestarbox:master-stable Image ID: gcr.io/engineeringpit/lodestar-images/lodestarbox@sha256:825a0581d069cb8056effcc909c4108144200594e5f34977359ab4df875932ac Port: 8080/TCP Host Port: 0/TCP Command: python3 Args: /lodestar/scripts/lodemon_run.py -W default State: Running Started: Tue, 13 Jun 2023 15:32:09 +0000 Ready: True Restart Count: 0 Limits: cpu: 2 memory: 2Gi Requests: cpu: 1 memory: 1Gi Liveness: exec [cat /tmp/lodemon_alive] delay=20s timeout=1s period=10s #success=1 #failure=3 Readiness: exec [cat /tmp/lodemon_alive] delay=20s timeout=1s period=10s #success=1 #failure=3 Environment: SKAFFOLD_PROFILE: medium Mounts: /lodestar/config/config.yaml from config (rw,path="config.yaml") /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-q7lql (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: config: Type: ConfigMap (a volume populated by a ConfigMap) Name: lodemon-config Optional: false kube-api-access-q7lql: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: DownwardAPI: true QoS Class: Burstable Node-Selectors: Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 51s default-scheduler Successfully assigned xlou/lodemon-6455d5ff7c-zzndq to gke-xlou-cdm-default-pool-f05840a3-s6rh Normal Pulling 51s kubelet Pulling image "gcr.io/engineeringpit/lodestar-images/lodestarbox:master-stable" Normal Pulled 34s kubelet Successfully pulled image "gcr.io/engineeringpit/lodestar-images/lodestarbox:master-stable" in 17.073103826s Normal Created 34s kubelet Created container lodemon Normal Started 34s kubelet Started container lodemon ==================================================================================================== =========================================== Pod logs =========================================== ==================================================================================================== 16:32:10 INFO 16:32:10 INFO --------------------- Get expected number of pods --------------------- 16:32:10 INFO 16:32:10 INFO [loop_until]: kubectl --namespace=xlou get deployments --selector app=am --output jsonpath={.items[*].spec.replicas} 16:32:10 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:10 INFO [loop_until]: OK (rc = 0) 16:32:10 DEBUG --- stdout --- 16:32:10 DEBUG 3 16:32:10 DEBUG --- stderr --- 16:32:10 DEBUG 16:32:10 INFO 16:32:10 INFO ---------------------------- Get pod list ---------------------------- 16:32:10 INFO 16:32:10 INFO [loop_until]: kubectl --namespace=xlou get pods --selector app=am --output jsonpath={.items[*].metadata.name} 16:32:10 INFO [loop_until]: (max_time=180, interval=10, expected_rc=[0] 16:32:10 INFO [loop_until]: OK (rc = 0) 16:32:10 DEBUG --- stdout --- 16:32:10 DEBUG am-7849cf7bdb-lzlql am-7849cf7bdb-ntqzs am-7849cf7bdb-xrngh 16:32:10 DEBUG --- stderr --- 16:32:10 DEBUG 16:32:10 INFO 16:32:10 INFO -------------- Check pod am-7849cf7bdb-lzlql is running -------------- 16:32:10 INFO 16:32:10 INFO [loop_until]: kubectl --namespace=xlou get pods am-7849cf7bdb-lzlql -o=jsonpath={.status.phase} | grep "Running" 16:32:10 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:10 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:10 INFO [loop_until]: OK (rc = 0) 16:32:10 DEBUG --- stdout --- 16:32:10 DEBUG Running 16:32:10 DEBUG --- stderr --- 16:32:10 DEBUG 16:32:10 INFO 16:32:10 INFO [loop_until]: kubectl --namespace=xlou get pods am-7849cf7bdb-lzlql -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:10 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG true 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pod am-7849cf7bdb-lzlql --output jsonpath={.status.startTime} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 2023-06-13T15:28:13Z 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ------- Check pod am-7849cf7bdb-lzlql filesystem is accessible ------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou exec am-7849cf7bdb-lzlql --container openam -- ls / | grep "bin" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG bin boot dev etc home lib lib32 lib64 libx32 media mnt opt proc root run sbin srv sys tmp usr var 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ------------- Check pod am-7849cf7bdb-lzlql restart count ------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pod am-7849cf7bdb-lzlql --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 0 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO Pod am-7849cf7bdb-lzlql has been restarted 0 times. 16:32:11 INFO 16:32:11 INFO -------------- Check pod am-7849cf7bdb-ntqzs is running -------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pods am-7849cf7bdb-ntqzs -o=jsonpath={.status.phase} | grep "Running" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG Running 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pods am-7849cf7bdb-ntqzs -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG true 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pod am-7849cf7bdb-ntqzs --output jsonpath={.status.startTime} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 2023-06-13T15:28:13Z 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ------- Check pod am-7849cf7bdb-ntqzs filesystem is accessible ------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou exec am-7849cf7bdb-ntqzs --container openam -- ls / | grep "bin" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG bin boot dev etc home lib lib32 lib64 libx32 media mnt opt proc root run sbin srv sys tmp usr var 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ------------- Check pod am-7849cf7bdb-ntqzs restart count ------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pod am-7849cf7bdb-ntqzs --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 0 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO Pod am-7849cf7bdb-ntqzs has been restarted 0 times. 16:32:11 INFO 16:32:11 INFO -------------- Check pod am-7849cf7bdb-xrngh is running -------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pods am-7849cf7bdb-xrngh -o=jsonpath={.status.phase} | grep "Running" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG Running 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pods am-7849cf7bdb-xrngh -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG true 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pod am-7849cf7bdb-xrngh --output jsonpath={.status.startTime} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 2023-06-13T15:28:13Z 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ------- Check pod am-7849cf7bdb-xrngh filesystem is accessible ------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou exec am-7849cf7bdb-xrngh --container openam -- ls / | grep "bin" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG bin boot dev etc home lib lib32 lib64 libx32 media mnt opt proc root run sbin srv sys tmp usr var 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ------------- Check pod am-7849cf7bdb-xrngh restart count ------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pod am-7849cf7bdb-xrngh --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 0 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO Pod am-7849cf7bdb-xrngh has been restarted 0 times. 16:32:11 INFO 16:32:11 INFO --------------------- Get expected number of pods --------------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get deployment --selector app=idm --output jsonpath={.items[*].spec.replicas} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 2 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ---------------------------- Get pod list ---------------------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pods --selector app=idm --output jsonpath={.items[*].metadata.name} 16:32:11 INFO [loop_until]: (max_time=180, interval=10, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG idm-58dc667486-ql6zm idm-58dc667486-rgz9j 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO -------------- Check pod idm-58dc667486-ql6zm is running -------------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pods idm-58dc667486-ql6zm -o=jsonpath={.status.phase} | grep "Running" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG Running 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pods idm-58dc667486-ql6zm -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG true 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou get pod idm-58dc667486-ql6zm --output jsonpath={.status.startTime} 16:32:11 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:11 INFO [loop_until]: OK (rc = 0) 16:32:11 DEBUG --- stdout --- 16:32:11 DEBUG 2023-06-13T15:28:14Z 16:32:11 DEBUG --- stderr --- 16:32:11 DEBUG 16:32:11 INFO 16:32:11 INFO ------- Check pod idm-58dc667486-ql6zm filesystem is accessible ------- 16:32:11 INFO 16:32:11 INFO [loop_until]: kubectl --namespace=xlou exec idm-58dc667486-ql6zm --container openidm -- ls / | grep "bin" 16:32:11 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG Dockerfile.java-11 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ------------ Check pod idm-58dc667486-ql6zm restart count ------------ 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pod idm-58dc667486-ql6zm --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:12 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG 0 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO Pod idm-58dc667486-ql6zm has been restarted 0 times. 16:32:12 INFO 16:32:12 INFO -------------- Check pod idm-58dc667486-rgz9j is running -------------- 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pods idm-58dc667486-rgz9j -o=jsonpath={.status.phase} | grep "Running" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG Running 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pods idm-58dc667486-rgz9j -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG true 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pod idm-58dc667486-rgz9j --output jsonpath={.status.startTime} 16:32:12 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG 2023-06-13T15:28:13Z 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ------- Check pod idm-58dc667486-rgz9j filesystem is accessible ------- 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou exec idm-58dc667486-rgz9j --container openidm -- ls / | grep "bin" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG Dockerfile.java-11 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ------------ Check pod idm-58dc667486-rgz9j restart count ------------ 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pod idm-58dc667486-rgz9j --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:12 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG 0 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO Pod idm-58dc667486-rgz9j has been restarted 0 times. 16:32:12 INFO 16:32:12 INFO --------------------- Get expected number of pods --------------------- 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get statefulsets --selector app=ds-idrepo --output jsonpath={.items[*].spec.replicas} 16:32:12 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG 3 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ---------------------------- Get pod list ---------------------------- 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pods --selector app=ds-idrepo --output jsonpath={.items[*].metadata.name} 16:32:12 INFO [loop_until]: (max_time=180, interval=10, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG ds-idrepo-0 ds-idrepo-1 ds-idrepo-2 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ------------------ Check pod ds-idrepo-0 is running ------------------ 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pods ds-idrepo-0 -o=jsonpath={.status.phase} | grep "Running" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG Running 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pods ds-idrepo-0 -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG true 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pod ds-idrepo-0 --output jsonpath={.status.startTime} 16:32:12 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG 2023-06-13T14:32:28Z 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ----------- Check pod ds-idrepo-0 filesystem is accessible ----------- 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou exec ds-idrepo-0 --container ds -- ls / | grep "bin" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG Dockerfile.java-17 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ----------------- Check pod ds-idrepo-0 restart count ----------------- 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pod ds-idrepo-0 --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:12 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG 0 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO Pod ds-idrepo-0 has been restarted 0 times. 16:32:12 INFO 16:32:12 INFO ------------------ Check pod ds-idrepo-1 is running ------------------ 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pods ds-idrepo-1 -o=jsonpath={.status.phase} | grep "Running" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG Running 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pods ds-idrepo-1 -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG true 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou get pod ds-idrepo-1 --output jsonpath={.status.startTime} 16:32:12 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:12 INFO [loop_until]: OK (rc = 0) 16:32:12 DEBUG --- stdout --- 16:32:12 DEBUG 2023-06-13T14:55:43Z 16:32:12 DEBUG --- stderr --- 16:32:12 DEBUG 16:32:12 INFO 16:32:12 INFO ----------- Check pod ds-idrepo-1 filesystem is accessible ----------- 16:32:12 INFO 16:32:12 INFO [loop_until]: kubectl --namespace=xlou exec ds-idrepo-1 --container ds -- ls / | grep "bin" 16:32:12 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG Dockerfile.java-17 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ----------------- Check pod ds-idrepo-1 restart count ----------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pod ds-idrepo-1 --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG 0 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO Pod ds-idrepo-1 has been restarted 0 times. 16:32:13 INFO 16:32:13 INFO ------------------ Check pod ds-idrepo-2 is running ------------------ 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pods ds-idrepo-2 -o=jsonpath={.status.phase} | grep "Running" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG Running 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pods ds-idrepo-2 -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG true 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pod ds-idrepo-2 --output jsonpath={.status.startTime} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG 2023-06-13T15:12:17Z 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ----------- Check pod ds-idrepo-2 filesystem is accessible ----------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou exec ds-idrepo-2 --container ds -- ls / | grep "bin" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG Dockerfile.java-17 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ----------------- Check pod ds-idrepo-2 restart count ----------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pod ds-idrepo-2 --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG 0 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO Pod ds-idrepo-2 has been restarted 0 times. 16:32:13 INFO 16:32:13 INFO --------------------- Get expected number of pods --------------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get statefulsets --selector app=ds-cts --output jsonpath={.items[*].spec.replicas} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG 3 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ---------------------------- Get pod list ---------------------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pods --selector app=ds-cts --output jsonpath={.items[*].metadata.name} 16:32:13 INFO [loop_until]: (max_time=180, interval=10, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG ds-cts-0 ds-cts-1 ds-cts-2 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO -------------------- Check pod ds-cts-0 is running -------------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pods ds-cts-0 -o=jsonpath={.status.phase} | grep "Running" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG Running 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pods ds-cts-0 -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG true 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pod ds-cts-0 --output jsonpath={.status.startTime} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG 2023-06-13T14:32:28Z 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ------------- Check pod ds-cts-0 filesystem is accessible ------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou exec ds-cts-0 --container ds -- ls / | grep "bin" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG Dockerfile.java-17 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ------------------ Check pod ds-cts-0 restart count ------------------ 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pod ds-cts-0 --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG 0 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO Pod ds-cts-0 has been restarted 0 times. 16:32:13 INFO 16:32:13 INFO -------------------- Check pod ds-cts-1 is running -------------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pods ds-cts-1 -o=jsonpath={.status.phase} | grep "Running" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG Running 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pods ds-cts-1 -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG true 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pod ds-cts-1 --output jsonpath={.status.startTime} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG 2023-06-13T14:32:58Z 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ------------- Check pod ds-cts-1 filesystem is accessible ------------- 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou exec ds-cts-1 --container ds -- ls / | grep "bin" 16:32:13 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:13 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:13 INFO [loop_until]: OK (rc = 0) 16:32:13 DEBUG --- stdout --- 16:32:13 DEBUG Dockerfile.java-17 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:13 DEBUG --- stderr --- 16:32:13 DEBUG 16:32:13 INFO 16:32:13 INFO ------------------ Check pod ds-cts-1 restart count ------------------ 16:32:13 INFO 16:32:13 INFO [loop_until]: kubectl --namespace=xlou get pod ds-cts-1 --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:13 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:14 INFO [loop_until]: OK (rc = 0) 16:32:14 DEBUG --- stdout --- 16:32:14 DEBUG 0 16:32:14 DEBUG --- stderr --- 16:32:14 DEBUG 16:32:14 INFO Pod ds-cts-1 has been restarted 0 times. 16:32:14 INFO 16:32:14 INFO -------------------- Check pod ds-cts-2 is running -------------------- 16:32:14 INFO 16:32:14 INFO [loop_until]: kubectl --namespace=xlou get pods ds-cts-2 -o=jsonpath={.status.phase} | grep "Running" 16:32:14 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:14 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:14 INFO [loop_until]: OK (rc = 0) 16:32:14 DEBUG --- stdout --- 16:32:14 DEBUG Running 16:32:14 DEBUG --- stderr --- 16:32:14 DEBUG 16:32:14 INFO 16:32:14 INFO [loop_until]: kubectl --namespace=xlou get pods ds-cts-2 -o=jsonpath={.status.containerStatuses[*].ready} | grep "true" 16:32:14 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:14 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:14 INFO [loop_until]: OK (rc = 0) 16:32:14 DEBUG --- stdout --- 16:32:14 DEBUG true 16:32:14 DEBUG --- stderr --- 16:32:14 DEBUG 16:32:14 INFO 16:32:14 INFO [loop_until]: kubectl --namespace=xlou get pod ds-cts-2 --output jsonpath={.status.startTime} 16:32:14 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:14 INFO [loop_until]: OK (rc = 0) 16:32:14 DEBUG --- stdout --- 16:32:14 DEBUG 2023-06-13T14:33:28Z 16:32:14 DEBUG --- stderr --- 16:32:14 DEBUG 16:32:14 INFO 16:32:14 INFO ------------- Check pod ds-cts-2 filesystem is accessible ------------- 16:32:14 INFO 16:32:14 INFO [loop_until]: kubectl --namespace=xlou exec ds-cts-2 --container ds -- ls / | grep "bin" 16:32:14 INFO [loop_until]: (max_time=360, interval=5, expected_rc=[0] 16:32:14 INFO [loop_until]: Function succeeded after 0s (rc=0) - expected pattern found 16:32:14 INFO [loop_until]: OK (rc = 0) 16:32:14 DEBUG --- stdout --- 16:32:14 DEBUG Dockerfile.java-17 bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var 16:32:14 DEBUG --- stderr --- 16:32:14 DEBUG 16:32:14 INFO 16:32:14 INFO ------------------ Check pod ds-cts-2 restart count ------------------ 16:32:14 INFO 16:32:14 INFO [loop_until]: kubectl --namespace=xlou get pod ds-cts-2 --output jsonpath={.status.containerStatuses[*].restartCount} 16:32:14 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:14 INFO [loop_until]: OK (rc = 0) 16:32:14 DEBUG --- stdout --- 16:32:14 DEBUG 0 16:32:14 DEBUG --- stderr --- 16:32:14 DEBUG 16:32:14 INFO Pod ds-cts-2 has been restarted 0 times. * Serving Flask app 'lodemon_run' * Debug mode: off WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. * Running on all addresses (0.0.0.0) * Running on http://127.0.0.1:8080 * Running on http://10.106.46.66:8080 Press CTRL+C to quit 16:32:38 INFO 16:32:38 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:38 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:38 INFO [loop_until]: OK (rc = 0) 16:32:38 DEBUG --- stdout --- 16:32:38 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:38 DEBUG --- stderr --- 16:32:38 DEBUG 16:32:38 INFO 16:32:38 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:38 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:38 INFO [loop_until]: OK (rc = 0) 16:32:38 DEBUG --- stdout --- 16:32:38 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:38 DEBUG --- stderr --- 16:32:38 DEBUG 16:32:38 INFO 16:32:38 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:38 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:38 INFO [loop_until]: OK (rc = 0) 16:32:38 DEBUG --- stdout --- 16:32:38 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:38 DEBUG --- stderr --- 16:32:38 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:39 INFO 16:32:39 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:39 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:39 INFO [loop_until]: OK (rc = 0) 16:32:39 DEBUG --- stdout --- 16:32:39 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:39 DEBUG --- stderr --- 16:32:39 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:40 INFO [loop_until]: OK (rc = 0) 16:32:40 DEBUG --- stdout --- 16:32:40 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:40 DEBUG --- stderr --- 16:32:40 DEBUG 16:32:40 INFO 16:32:40 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:40 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO 16:32:41 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO 16:32:41 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO 16:32:41 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO 16:32:41 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO 16:32:41 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO 16:32:41 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO 16:32:41 INFO [loop_until]: kubectl get services -o=jsonpath='{.items[?(@.metadata.labels.app=="kube-prometheus-stack-prometheus")]}' --all-namespaces 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"cloud.google.com/neg":"{\"ingress\":true}","meta.helm.sh/release-name":"prometheus-operator","meta.helm.sh/release-namespace":"monitoring"},"creationTimestamp":"2023-05-27T02:35:19Z","labels":{"app":"kube-prometheus-stack-prometheus","app.kubernetes.io/instance":"prometheus-operator","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/part-of":"kube-prometheus-stack","app.kubernetes.io/version":"46.4.1","chart":"kube-prometheus-stack-46.4.1","heritage":"Helm","release":"prometheus-operator","self-monitor":"true"},"managedFields":[{"apiVersion":"v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:meta.helm.sh/release-name":{},"f:meta.helm.sh/release-namespace":{}},"f:labels":{".":{},"f:app":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:app.kubernetes.io/part-of":{},"f:app.kubernetes.io/version":{},"f:chart":{},"f:heritage":{},"f:release":{},"f:self-monitor":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":9090,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:selector":{},"f:sessionAffinity":{},"f:type":{}}},"manager":"helm","operation":"Update","time":"2023-05-27T02:35:19Z"}],"name":"prometheus-operator-kube-p-prometheus","namespace":"monitoring","resourceVersion":"7148","uid":"eb1f35e8-cd91-4a12-8fe3-2e8bdf841da5"},"spec":{"clusterIP":"10.106.49.67","clusterIPs":["10.106.49.67"],"internalTrafficPolicy":"Cluster","ipFamilies":["IPv4"],"ipFamilyPolicy":"SingleStack","ports":[{"name":"http-web","port":9090,"protocol":"TCP","targetPort":9090}],"selector":{"app.kubernetes.io/name":"prometheus","prometheus":"prometheus-operator-kube-p-prometheus"},"sessionAffinity":"None","type":"ClusterIP"},"status":{"loadBalancer":{}}} 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG 16:32:41 INFO Initializing monitoring instance threads 16:32:41 DEBUG Monitoring instance thread list: [, , , , , , , , , , , , , , , , , , , , , , , , , , , , ] 16:32:41 INFO Starting instance threads 16:32:41 INFO 16:32:41 INFO Thread started 16:32:41 INFO [loop_until]: kubectl --namespace=xlou top node 16:32:41 INFO 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO Thread started 16:32:41 INFO [loop_until]: kubectl --namespace=xlou top pods 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28container_cpu_usage_seconds_total%7Bnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 INFO [loop_until]: (max_time=180, interval=5, expected_rc=[0] 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28container_memory_usage_bytes%7Bnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28container_fs_reads_total%7Bnamespace%3D%27xlou%27%2Cjob%3D%27kubelet%27%2Cmetrics_path%3D%27/metrics/cadvisor%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28container_fs_writes_total%7Bnamespace%3D%27xlou%27%2Cjob%3D%27kubelet%27%2Cmetrics_path%3D%27/metrics/cadvisor%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28node_namespace_pod_container%3Acontainer_cpu_usage_seconds_total%3Asum_irate%29by%28node%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28node_namespace_pod_container%3Acontainer_memory_working_set_bytes%29by%28node%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28node_cpu_seconds_total%7Bmode%3D%27iowait%27%7D%5B60s%5D%29%29by%28instance%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28node_network_receive_bytes_total%7Bjob%3D%27node-exporter%27%2Cdevice%21%3D%27lo%27%7D%5B60s%5D%29%29by%28instance%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28node_network_transmit_bytes_total%7Bjob%3D%27node-exporter%27%2Cdevice%21%3D%27lo%27%7D%5B60s%5D%29%29by%28instance%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28am_cts_task_count%7Btoken_type%3D%27session%27%2Cnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28irate%28am_session_count%7Bsession_type%3D~%27authentication-.%2A%27%2Coperation%3D%27create%27%2Cnamespace%3D%27xlou%27%2Coutcome%3D%27success%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO Thread started 16:32:41 DEBUG --- status code --- 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG --- status code --- 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG --- status code --- 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG --- status code --- 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28ds_backend_db_cache_misses_internal_nodes%7Bbackend%3D%27amCts%27%2Cnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28ds_backend_db_cache_misses_internal_nodes%7Bbackend%3D%27amIdentityStore%27%2Cnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG --- http response --- 16:32:41 INFO Thread started 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG --- http response --- 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (143 lines): 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28ds_backend_db_cache_misses_internal_nodes%7Bbackend%3D%27cfgStore%27%2Cnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 INFO Thread started 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (143 lines): 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (134 lines): 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (134 lines): 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG --- http response --- 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28ds_backend_db_cache_misses_internal_nodes%7Bbackend%3D%27idmRepo%27%2Cnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG --- http response --- 16:32:41 INFO Thread started 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (116 lines): 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (134 lines): 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (134 lines): 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (116 lines): 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG { 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (134 lines): 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28am_authentication_count%7Bnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG ----- output ----- 16:32:41 INFO Thread started 16:32:41 DEBUG { 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG { 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG { 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG { 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28am_cts_reaper_search_count%7Bnamespace%3D%27xlou%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG "status": "success", 16:32:41 INFO Thread started 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG { 16:32:41 DEBUG { 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG { 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG "data": { 16:32:41 DEBUG { 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG { 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG --- stdout --- 16:32:41 DEBUG "data": { 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28am_oauth2_grant_count%7Bnamespace%3D%27xlou%27%2Cgrant_type%3D~%27authorization-code%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG --- status code --- 16:32:41 INFO Thread started 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG "data": { 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG "data": { 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (62 lines): 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG "data": { 16:32:41 DEBUG NAME CPU(cores) MEMORY(bytes) admin-ui-7657fd47d8-q5vwf 1m 4Mi am-7849cf7bdb-lzlql 31m 2610Mi am-7849cf7bdb-ntqzs 18m 4416Mi am-7849cf7bdb-xrngh 16m 4410Mi ds-cts-0 15m 379Mi ds-cts-1 16m 384Mi ds-cts-2 15m 369Mi ds-idrepo-0 30m 3516Mi ds-idrepo-1 58m 3243Mi ds-idrepo-2 45m 3244Mi end-user-ui-5c589c665b-7tbkq 1m 3Mi idm-58dc667486-ql6zm 11m 3452Mi idm-58dc667486-rgz9j 9m 3415Mi lodemon-6455d5ff7c-zzndq 414m 60Mi login-ui-55f8bc5458-9zpvt 1m 3Mi overseer-0-7bb59f88f9-gcrrq 1m 15Mi 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28ds_backend_ttl_entries_deleted_count%7Bnamespace%3D%27xlou%27%2Cbackend%3D~%27amCts%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG "data": { 16:32:41 INFO Thread started 16:32:41 DEBUG "data": { 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG "data": { 16:32:41 DEBUG { 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG "result": [ 16:32:41 DEBUG "data": { 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): 16:32:41 DEBUG "data": { 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG "result": [ 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG "resultType": "vector", 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28ds_replication_replica_remote_replicas_receive_delay_seconds+%7Bnamespace%3D%27xlou%27%2Cdomain_name%3D%27ou%3Dtokens%27%7D%5B60s%5D%29%29by%28pod%29&time=1686670361" 16:32:41 DEBUG "resultType": "vector", 16:32:41 INFO Thread started 16:32:41 DEBUG "result": [ Exception in thread Thread-23: Traceback (most recent call last): 16:32:41 DEBUG "resultType": "vector", File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG "data": { 16:32:41 DEBUG "result": [ self.run() 16:32:41 DEBUG { File "/usr/local/lib/python3.9/threading.py", line 910, in run 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG { self._target(*self._args, **self._kwargs) File "/home/jenkins/lodestar/shared/lib/monitoring/lodemon_service.py", line 144, in execute_monitoring_instance_in_loop instance.run() File "/home/jenkins/lodestar/shared/lib/monitoring/monitoring.py", line 284, in run if self.prom_data['functions']: KeyError: 'functions' 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG "result": [ 16:32:41 DEBUG 16:32:41 DEBUG { 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG "result": [ 16:32:41 DEBUG "result": [ 16:32:41 DEBUG { 16:32:41 INFO Thread started 16:32:41 DEBUG "result": [ Exception in thread Thread-24: Traceback (most recent call last): 16:32:41 DEBUG "resultType": "vector", File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner 16:32:41 DEBUG { 16:32:41 INFO [http_cmd]: http status code OK self.run() File "/usr/local/lib/python3.9/threading.py", line 910, in run 16:32:41 DEBUG "data": { 16:32:41 INFO [http_cmd]: http status code OK self._target(*self._args, **self._kwargs) File "/home/jenkins/lodestar/shared/lib/monitoring/lodemon_service.py", line 144, in execute_monitoring_instance_in_loop 16:32:41 INFO [loop_until]: OK (rc = 0) 16:32:41 DEBUG "metric": { instance.run() File "/home/jenkins/lodestar/shared/lib/monitoring/monitoring.py", line 284, in run 16:32:41 DEBUG "result": [ if self.prom_data['functions']: 16:32:41 DEBUG { KeyError: 'functions' 16:32:41 DEBUG "result": [ 16:32:41 DEBUG { 16:32:41 DEBUG "metric": { 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG { 16:32:41 DEBUG { 16:32:41 DEBUG "metric": { 16:32:41 DEBUG { 16:32:41 INFO Thread started 16:32:41 DEBUG "result": [ Exception in thread Thread-25: Traceback (most recent call last): 16:32:41 DEBUG "metric": { File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG "resultType": "vector", self.run() File "/usr/local/lib/python3.9/threading.py", line 910, in run 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG --- stdout --- self._target(*self._args, **self._kwargs) File "/home/jenkins/lodestar/shared/lib/monitoring/lodemon_service.py", line 144, in execute_monitoring_instance_in_loop 16:32:41 DEBUG "pod": "admin-ui-7657fd47d8-q5vwf" instance.run() File "/home/jenkins/lodestar/shared/lib/monitoring/monitoring.py", line 284, in run 16:32:41 DEBUG { 16:32:41 DEBUG "status": "success", if self.prom_data['functions']: 16:32:41 DEBUG { KeyError: 'functions' 16:32:41 DEBUG "metric": { 16:32:41 DEBUG "pod": "admin-ui-7657fd47d8-q5vwf" 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG { 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): 16:32:41 DEBUG "metric": { 16:32:41 DEBUG "metric": { 16:32:41 DEBUG "node": "gke-xlou-cdm-default-pool-f05840a3-s6rh" 16:32:41 DEBUG "metric": { 16:32:41 DEBUG { 16:32:41 INFO Thread started 16:32:41 DEBUG "node": "gke-xlou-cdm-default-pool-f05840a3-s6rh" 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG "result": [ 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% gke-xlou-cdm-default-pool-f05840a3-5mhf 71m 0% 4431Mi 7% gke-xlou-cdm-default-pool-f05840a3-6hll 95m 0% 3717Mi 6% gke-xlou-cdm-default-pool-f05840a3-83ll 84m 0% 5534Mi 9% gke-xlou-cdm-default-pool-f05840a3-8bck 79m 0% 4444Mi 7% gke-xlou-cdm-default-pool-f05840a3-b4tr 82m 0% 5670Mi 9% gke-xlou-cdm-default-pool-f05840a3-m0lk 232m 1% 3087Mi 5% gke-xlou-cdm-default-pool-f05840a3-s6rh 383m 2% 1482Mi 2% gke-xlou-cdm-ds-32e4dcb1-015h 72m 0% 1137Mi 1% gke-xlou-cdm-ds-32e4dcb1-0hks 76m 0% 1155Mi 1% gke-xlou-cdm-ds-32e4dcb1-5msf 106m 0% 3966Mi 6% gke-xlou-cdm-ds-32e4dcb1-jr09 93m 0% 4236Mi 7% gke-xlou-cdm-ds-32e4dcb1-vg18 76m 0% 1125Mi 1% gke-xlou-cdm-ds-32e4dcb1-vhcr 114m 0% 3988Mi 6% gke-xlou-cdm-frontend-a8771548-h3n6 85m 0% 1311Mi 2% 16:32:41 DEBUG }, 16:32:41 DEBUG "metric": { 16:32:41 DEBUG "data": { 16:32:41 DEBUG "metric": { 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG "pod": "admin-ui-7657fd47d8-q5vwf" 16:32:41 DEBUG }, 16:32:41 DEBUG { 16:32:41 DEBUG http status code is 200 (expected 200) 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG "instance": "10.142.0.100:9100" 16:32:41 DEBUG "instance": "10.142.0.100:9100" 16:32:41 DEBUG }, 16:32:41 DEBUG "pod": "admin-ui-7657fd47d8-q5vwf" 16:32:41 DEBUG "metric": { 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28node_disk_read_bytes_total%7Bjob%3D%27node-exporter%27%2Cdevice%3D~%27nvme.%2B%7Crbd.%2B%7Csd.%2B%7Cvd.%2B%7Cxvd.%2B%7Cdasd.%2B%27%7D%29%29by%28node%29&time=1686670361" 16:32:41 DEBUG }, 16:32:41 INFO Thread started 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG { 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG --- stderr --- 16:32:41 DEBUG "value": [ 16:32:41 DEBUG "instance": "10.142.0.100:9100" 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG "pod": "am-7849cf7bdb-ntqzs" 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG }, 16:32:41 DEBUG "value": [ 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG "data": { 16:32:41 DEBUG { 16:32:41 DEBUG }, 16:32:41 DEBUG }, 16:32:41 DEBUG "value": [ 16:32:41 DEBUG }, 16:32:41 DEBUG "pod": "am-7849cf7bdb-ntqzs" 16:32:41 DEBUG "value": [ 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28avg_over_time%28node_disk_written_bytes_total%7Bjob%3D%27node-exporter%27%2Cdevice%3D~%27nvme.%2B%7Crbd.%2B%7Csd.%2B%7Cvd.%2B%7Cxvd.%2B%7Cdasd.%2B%27%7D%5B60s%5D%29%29by%28node%29&time=1686670361" 16:32:41 INFO Thread started 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): Exception in thread Thread-28: Traceback (most recent call last): 16:32:41 DEBUG "metric": { File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (35 lines): 16:32:41 DEBUG self.run() 16:32:41 DEBUG File "/usr/local/lib/python3.9/threading.py", line 910, in run 16:32:41 DEBUG }, self._target(*self._args, **self._kwargs) 16:32:41 DEBUG "result": [ File "/home/jenkins/lodestar/shared/lib/monitoring/lodemon_service.py", line 144, in execute_monitoring_instance_in_loop 16:32:41 DEBUG }, 16:32:41 DEBUG http status code is 200 (expected 200) instance.run() File "/home/jenkins/lodestar/shared/lib/monitoring/monitoring.py", line 284, in run 16:32:41 DEBUG "value": [ if self.prom_data['functions']: 16:32:41 DEBUG KeyError: 'functions' 16:32:41 DEBUG "data": { 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (62 lines): 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG "value": [ 16:32:41 DEBUG "value": [ 16:32:41 DEBUG 16:32:41 DEBUG "value": [ 16:32:41 DEBUG }, 16:32:41 DEBUG 16:32:41 DEBUG ----- output ----- 16:32:41 INFO Thread started 16:32:41 DEBUG "pod": "ds-cts-0" 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG 16:32:41 DEBUG "value": [ 16:32:41 DEBUG { 16:32:41 DEBUG "value": [ 16:32:41 DEBUG --- http response --- 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG "result": [ 16:32:41 ERROR [http_cmd]: ERROR 16:32:41 DEBUG "data": { 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG "value": [ 16:32:41 DEBUG 16:32:41 DEBUG { 16:32:41 INFO [http_cmd]: curl --insecure -L --request GET "http://prometheus-operator-kube-p-prometheus.monitoring.svc.cluster.local:9090/api/v1/query?query=sum%28rate%28node_disk_io_time_seconds_total%7Bjob%3D%27node-exporter%27%2Cdevice%3D~%27nvme.%2B%7Crbd.%2B%7Csd.%2B%7Cvd.%2B%7Cxvd.%2B%7Cdasd.%2B%27%7D%5B60s%5D%29%29by%28instance%29&time=1686670361" 16:32:41 DEBUG }, 16:32:41 INFO Thread started 16:32:41 DEBUG { 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG "metric": { 16:32:41 DEBUG 16:32:41 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (53 lines): 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG "result": [ 16:32:41 DEBUG { 16:32:41 DEBUG { 16:32:41 ERROR --- status code --- 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG "value": [ 16:32:41 INFO All threads has been started 127.0.0.1 - - [13/Jun/2023 16:32:41] "GET /monitoring/start HTTP/1.1" 200 - 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG [...] 16:32:41 DEBUG 16:32:41 DEBUG "pod": "ds-idrepo-2" 16:32:41 DEBUG 16:32:41 DEBUG ----- output ----- 16:32:41 DEBUG 16:32:41 DEBUG [...] 16:32:41 DEBUG { 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG "metric": { 16:32:41 ERROR http status code is 400 (expected 200) 16:32:41 DEBUG "result": [ 16:32:41 INFO [http_cmd]: http status code OK 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG [...] 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG [...] 16:32:41 DEBUG "data": { 16:32:41 DEBUG 16:32:41 DEBUG "data": { 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG }, 16:32:41 DEBUG 16:32:41 DEBUG { 16:32:41 DEBUG [...] 16:32:41 DEBUG 16:32:41 DEBUG "metric": { 16:32:41 DEBUG "data": { 16:32:41 DEBUG "pod": "ds-idrepo-2" 16:32:41 ERROR --- http response --- 16:32:41 DEBUG { 16:32:41 DEBUG --- status code --- 16:32:41 DEBUG [...] 16:32:41 DEBUG [...] 16:32:41 DEBUG 16:32:41 DEBUG [...] 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG 16:32:41 DEBUG [...] 16:32:41 DEBUG "value": [ 16:32:41 DEBUG [...] 16:32:41 DEBUG "status": "success", 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG "pod": "ds-idrepo-2" 16:32:41 DEBUG "resultType": "vector", 16:32:41 DEBUG }, 16:32:41 DEBUG "metric": { 16:32:41 DEBUG { "status": "error", "errorType": "bad_data", "error": "invalid parameter \"query\": 1:10: parse error: expected type range vector in call to function \"rate\", got instant vector" } Exception in thread Thread-26: Traceback (most recent call last): 16:32:41 DEBUG http status code is 200 (expected 200) File "/home/jenkins/lodestar/shared/lib/monitoring/monitoring.py", line 298, in run 16:32:41 DEBUG 16:32:41 DEBUG response = http_cmd.get(url=url_encoded, retries=5) File "/home/jenkins/lodestar/shared/lib/utils/HttpCmd.py", line 277, in get 16:32:41 DEBUG 16:32:41 DEBUG 16:32:41 DEBUG [...] return self.request_cmd(url=url, **kwargs) File "/home/jenkins/lodestar/shared/lib/utils/HttpCmd.py", line 383, in request_cmd check_response(response, expected_status=expected_status, expected_string=expected_string, 16:32:41 DEBUG File "/home/jenkins/lodestar/shared/lib/utils/HttpCmd.py", line 241, in check_response 16:32:41 DEBUG "result": [ 16:32:41 DEBUG check_http_status(http_result=http_result, expected_status=expected_status, verbose=verbose) File "/home/jenkins/lodestar/shared/lib/utils/HttpCmd.py", line 204, in check_http_status 16:32:41 DEBUG "result": [ 16:32:41 DEBUG raise FailException(message='[http_cmd] request returns wrong status code', shared.lib.utils.exception.FailException: [http_cmd] request returns wrong status code 16:32:41 DEBUG During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner 16:32:41 DEBUG 16:32:41 DEBUG self.run() File "/usr/local/lib/python3.9/threading.py", line 910, in run 16:32:41 DEBUG "data": { 16:32:41 DEBUG self._target(*self._args, **self._kwargs) 16:32:41 DEBUG File "/home/jenkins/lodestar/shared/lib/monitoring/lodemon_service.py", line 144, in execute_monitoring_instance_in_loop 16:32:41 DEBUG }, instance.run() File "/home/jenkins/lodestar/shared/lib/monitoring/monitoring.py", line 314, in run 16:32:41 DEBUG "result": [ 16:32:41 DEBUG "value": [ self.logger(f'Query: {query} failed with: {e}') TypeError: 'LodestarLogger' object is not callable 16:32:41 DEBUG "pod": "am-7849cf7bdb-ntqzs" 16:32:41 DEBUG --- http response --- 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 INFO [http_cmd]: http status code OK 16:32:42 DEBUG { 16:32:42 DEBUG [...] 16:32:42 DEBUG { 16:32:42 DEBUG "pod": "ds-idrepo-2" 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG "resultType": "vector", 16:32:42 DEBUG 16:32:42 DEBUG "pod": "ds-idrepo-2" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG { 16:32:42 DEBUG 16:32:42 DEBUG }, 16:32:42 DEBUG { "status": "success", "data": { "resultType": "vector", "result": [ { "metric": {}, "value": [ 1686670361, "673415917568" ] } ] } } Exception in thread Thread-27: 16:32:42 DEBUG Traceback (most recent call last): File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner 16:32:42 DEBUG 16:32:42 DEBUG "node": "gke-xlou-cdm-frontend-a8771548-h3n6" self.run() 16:32:42 DEBUG File "/usr/local/lib/python3.9/threading.py", line 910, in run 16:32:42 DEBUG self._target(*self._args, **self._kwargs) File "/home/jenkins/lodestar/shared/lib/monitoring/lodemon_service.py", line 144, in execute_monitoring_instance_in_loop 16:32:42 DEBUG "node": "gke-xlou-cdm-ds-32e4dcb1-5msf" 16:32:42 DEBUG --- status code --- instance.run() 16:32:42 DEBUG "metric": { File "/home/jenkins/lodestar/shared/lib/monitoring/monitoring.py", line 302, in run 16:32:42 DEBUG component_name = metric["metric"][f'{component}'] KeyError: 'node' 16:32:42 DEBUG "metric": { 16:32:42 DEBUG }, 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG "result": [ 16:32:42 DEBUG "pod": "ds-idrepo-2" 16:32:42 DEBUG }, 16:32:42 DEBUG 16:32:42 DEBUG "metric": { 16:32:42 DEBUG 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "instance": "10.142.0.99:9100" 16:32:42 DEBUG "instance": "10.142.0.99:9100" 16:32:42 DEBUG }, 16:32:42 DEBUG "pod": "ds-idrepo-2" 16:32:42 DEBUG 16:32:42 DEBUG }, 16:32:42 DEBUG http status code is 200 (expected 200) 16:32:42 DEBUG "pod": "am-7849cf7bdb-ntqzs" 16:32:42 DEBUG 16:32:42 DEBUG "pod": "am-7849cf7bdb-ntqzs" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "instance": "10.142.0.99:9100" 16:32:42 DEBUG [...] 16:32:42 DEBUG "pod": "am-7849cf7bdb-xrngh" 16:32:42 DEBUG { 16:32:42 DEBUG }, 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 16:32:42 DEBUG "pod": "ds-cts-0" 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG }, 16:32:42 DEBUG }, 16:32:42 DEBUG "value": [ 16:32:42 DEBUG }, 16:32:42 DEBUG "pod": "am-7849cf7bdb-xrngh" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG --- http response --- 16:32:42 DEBUG }, 16:32:42 DEBUG 16:32:42 DEBUG }, 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG }, 16:32:42 DEBUG 16:32:42 DEBUG }, 16:32:42 DEBUG "metric": { 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG 16:32:42 DEBUG }, 16:32:42 DEBUG [...] 16:32:42 DEBUG 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG "value": [ 16:32:42 DEBUG }, 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG [print_head_tail]: Print head (10 lines) and tail (10 lines) of input string (134 lines): 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "pod": "ds-idrepo-0" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "123472.120445007" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "pod": "ds-cts-0" 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG "0.04509070158550678" 16:32:42 DEBUG [...] 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG "0.029769163585538494" 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "4514840576" 16:32:42 DEBUG ----- output ----- 16:32:42 DEBUG 16:32:42 DEBUG }, 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG }, 16:32:42 DEBUG "0" 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG [...] 16:32:42 DEBUG "6645.7" 16:32:42 DEBUG "0" 16:32:42 DEBUG ] 16:32:42 DEBUG "0" 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG ] 16:32:42 DEBUG { 16:32:42 DEBUG 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG "6590.566666666667" 16:32:42 DEBUG "pod": "ds-idrepo-0" 16:32:42 DEBUG "0" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG "0" 16:32:42 DEBUG } 16:32:42 DEBUG "status": "success", 16:32:42 DEBUG 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG }, 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG 16:32:42 DEBUG "pod": "ds-idrepo-0" 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG "data": { 16:32:42 DEBUG [...] 16:32:42 DEBUG "0" 16:32:42 DEBUG [...] 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG "value": [ 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG "pod": "ds-idrepo-0" 16:32:42 DEBUG [...] 16:32:42 DEBUG }, 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG "resultType": "vector", 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG }, 16:32:42 DEBUG 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "pod": "am-7849cf7bdb-xrngh" 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG "result": [ 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG "0" 16:32:42 DEBUG } 16:32:42 DEBUG [...] 16:32:42 DEBUG } 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG }, 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG { 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG 16:32:42 DEBUG "0" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG } 16:32:42 DEBUG "metric": { 16:32:42 DEBUG "pod": "am-7849cf7bdb-xrngh" 16:32:42 DEBUG } 16:32:42 DEBUG "pod": "am-7849cf7bdb-xrngh" 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG "0" 16:32:42 DEBUG "pod": "ds-idrepo-0" 16:32:42 DEBUG ] 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG "instance": "10.142.0.100:9100" 16:32:42 DEBUG }, 16:32:42 DEBUG } 16:32:42 DEBUG }, 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG }, 16:32:42 DEBUG } 16:32:42 DEBUG "0" 16:32:42 DEBUG }, 16:32:42 DEBUG "value": [ 16:32:42 DEBUG "value": [ 16:32:42 DEBUG } 16:32:42 DEBUG "pod": "ds-cts-2" 16:32:42 DEBUG } 16:32:42 DEBUG "value": [ 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG } 16:32:42 DEBUG }, 16:32:42 DEBUG ] 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG "0" 16:32:42 DEBUG "0.2" 16:32:42 DEBUG "value": [ 16:32:42 DEBUG } 16:32:42 DEBUG "0" 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG "0" 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG [...] 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG ] 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG 16:32:42 DEBUG } 16:32:42 DEBUG "instance": "10.142.0.99:9100" 16:32:42 DEBUG } 16:32:42 DEBUG }, 16:32:42 DEBUG "value": [ 16:32:42 DEBUG 1686670361, 16:32:42 DEBUG "0.0003333333333330302" 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG ] 16:32:42 DEBUG } 16:32:42 DEBUG }