Solved

ifsapp-odata pod pending

  • 28 December 2023
  • 2 replies
  • 186 views

Userlevel 1
Badge +6

ifs-odata pod status pending 

 

 

 

checked the pod describe

 

PS C:\ifsroot\deliveries\23R1\InstallationFiles\ifsinstaller> kubectl describe pod ifsapp-odata-74c7754c5f-nfr9t -n prd
Name:                 ifsapp-odata-74c7754c5f-nfr9t
Namespace:            prd
Priority:             19000000
Priority Class Name:  ifs-global-default
Service Account:      ifs-service-account
Node:                 <none>
Labels:               app=ifsapp-odata
                      customercode=sng
                      do-auth=true
                      env-crystal=true
                      env-database=true
                      env-demand=true
                      environmenttype=prd
                      internetaccess=true
                      linkerd.io/control-plane-ns=ifs-ingress
                      linkerd.io/proxy-deployment=ifsapp-odata
                      linkerd.io/workload-ns=prd
                      pod-template-hash=74c7754c5f
                      tier=backend
                      version=23.1.0.20230518150529.0
Annotations:          config.linkerd.io/proxy-cpu-limit: 1000m
                      config.linkerd.io/proxy-cpu-request: 20m
                      config.linkerd.io/proxy-memory-limit: 128Mi
                      config.linkerd.io/proxy-memory-request: 32Mi
                      config.linkerd.io/skip-outbound-ports: 20,21,1025-8079,8081-9099,9101-9989,9991-65535
                      linkerd.io/created-by: linkerd/proxy-injector stable-2.12.4
                      linkerd.io/inject: enabled
                      linkerd.io/proxy-version: stable-2.12.4
                      linkerd.io/trust-root-sha256: d81c8ef4ab27b3702f661c8d90e6e80d449bfd068ce0870738c7553549733a28
                      prometheus.io/port: 8080
                      prometheus.io/scrape: true
Status:               Pending
IP:
IPs:                  <none>
Controlled By:        ReplicaSet/ifsapp-odata-74c7754c5f
Init Containers:
  linkerd-init:
    Image:      ifscloud.jfrog.io/docker/linkerd/proxy-init:v2.0.0
    Port:       <none>
    Host Port:  <none>
    Args:
      --incoming-proxy-port
      4143
      --outgoing-proxy-port
      4140
      --proxy-uid
      2102
      --inbound-ports-to-ignore
      4190,4191,4567,4568
      --outbound-ports-to-ignore
      20,21,1025-8079,8081-9099,9101-9989,9991-65535
    Limits:
      cpu:     100m
      memory:  20Mi
    Requests:
      cpu:        100m
      memory:     20Mi
    Environment:  <none>
    Mounts:
      /run from linkerd-proxy-init-xtables-lock (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-rb92v (ro)
Containers:
  linkerd-proxy:
    Image:       ifscloud.jfrog.io/docker/linkerd/proxy:stable-2.12.4
    Ports:       4143/TCP, 4191/TCP
    Host Ports:  0/TCP, 0/TCP
    Limits:
      cpu:     1
      memory:  128Mi
    Requests:
      cpu:      20m
      memory:   32Mi
    Liveness:   http-get http://:4191/live delay=10s timeout=1s period=10s #success=1 #failure=3
    Readiness:  http-get http://:4191/ready delay=2s timeout=1s period=10s #success=1 #failure=3
    Environment:
      _pod_name:                                                ifsapp-odata-74c7754c5f-nfr9t (v1:metadata.name)
      _pod_ns:                                                  prd (v1:metadata.namespace)
      _pod_nodeName:                                             (v1:spec.nodeName)
      LINKERD2_PROXY_CORES:                                     1
      LINKERD2_PROXY_LOG:                                       warn,linkerd=info
      LINKERD2_PROXY_LOG_FORMAT:                                plain
      LINKERD2_PROXY_DESTINATION_SVC_ADDR:                      linkerd-dst-headless.ifs-ingress.svc.cluster.local.:8086
      LINKERD2_PROXY_DESTINATION_PROFILE_NETWORKS:              10.0.0.0/8,100.64.0.0/10,172.16.0.0/12,192.168.0.0/16
      LINKERD2_PROXY_POLICY_SVC_ADDR:                           linkerd-policy.ifs-ingress.svc.cluster.local.:8090
      LINKERD2_PROXY_POLICY_WORKLOAD:                           $(_pod_ns):$(_pod_name)
      LINKERD2_PROXY_INBOUND_DEFAULT_POLICY:                    all-unauthenticated
      LINKERD2_PROXY_POLICY_CLUSTER_NETWORKS:                   10.0.0.0/8,100.64.0.0/10,172.16.0.0/12,192.168.0.0/16
      LINKERD2_PROXY_INBOUND_CONNECT_TIMEOUT:                   100ms
      LINKERD2_PROXY_OUTBOUND_CONNECT_TIMEOUT:                  1000ms
      LINKERD2_PROXY_CONTROL_LISTEN_ADDR:                       0.0.0.0:4190
      LINKERD2_PROXY_ADMIN_LISTEN_ADDR:                         0.0.0.0:4191
      LINKERD2_PROXY_OUTBOUND_LISTEN_ADDR:                      127.0.0.1:4140
      LINKERD2_PROXY_INBOUND_LISTEN_ADDR:                       0.0.0.0:4143
      LINKERD2_PROXY_INBOUND_IPS:                                (v1:status.podIPs)
      LINKERD2_PROXY_INBOUND_PORTS:                             8080,8787
      LINKERD2_PROXY_DESTINATION_PROFILE_SUFFIXES:              svc.cluster.local.
      LINKERD2_PROXY_INBOUND_ACCEPT_KEEPALIVE:                  10000ms
      LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE:                10000ms
      LINKERD2_PROXY_INBOUND_PORTS_DISABLE_PROTOCOL_DETECTION:  25,587,3306,4444,5432,6379,9300,11211
      LINKERD2_PROXY_DESTINATION_CONTEXT:                       {"ns":"$(_pod_ns)", "nodeName":"$(_pod_nodeName)"}

      _pod_sa:                                                   (v1:spec.serviceAccountName)
      _l5d_ns:                                                  ifs-ingress
      _l5d_trustdomain:                                         cluster.local
      LINKERD2_PROXY_IDENTITY_DIR:                              /var/run/linkerd/identity/end-entity
      LINKERD2_PROXY_IDENTITY_TRUST_ANCHORS:                    -----BEGIN CERTIFICATE-----
                                                                MIIBnTCCAUOgAwIBAgIQHWsDtf6TWE80p9UNOX/8lDAKBggqhkjOPQQDAjAtMSsw
                                                                KQYDVQQDEyJpZGVudGl0eS5pZnMtaW5ncmVzcy5jbHVzdGVyLmxvY2FsMB4XDTIz
                                                                MTIyODEzMDY0OVoXDTMzMTIyNTEzMDY0OVowLTErMCkGA1UEAxMiaWRlbnRpdHku
                                                                aWZzLWluZ3Jlc3MuY2x1c3Rlci5sb2NhbDBZMBMGByqGSM49AgEGCCqGSM49AwEH
                                                                A0IABGcMTV5aDUkMBhEXUrv+vbTiL5A/0frVd44QfBNH7w8ImSkYwVG7NNqL4Cuf
                                                                OcRp6Rn8DuZwtZlXjspceKgfpAijRTBDMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMB
                                                                Af8ECDAGAQH/AgEBMB0GA1UdDgQWBBQtjVnFIPJTFwFKeVshid+0c/5J2jAKBggq
                                                                hkjOPQQDAgNIADBFAiBgT8x6cJNARrs2oD6muhY167twST7XTnFy4Cb5L4FXfQIh
                                                                AOmxS3jaEwTMZ+1kRFWfVBpzHME5V55Jf0b/UOvjUWb1
                                                                -----END CERTIFICATE-----

      LINKERD2_PROXY_IDENTITY_TOKEN_FILE:                       /var/run/secrets/tokens/linkerd-identity-token
      LINKERD2_PROXY_IDENTITY_SVC_ADDR:                         linkerd-identity-headless.ifs-ingress.svc.cluster.local.:8080
      LINKERD2_PROXY_IDENTITY_LOCAL_NAME:                       $(_pod_sa).$(_pod_ns).serviceaccount.identity.ifs-ingress.cluster.local
      LINKERD2_PROXY_IDENTITY_SVC_NAME:                         linkerd-identity.ifs-ingress.serviceaccount.identity.ifs-ingress.cluster.local
      LINKERD2_PROXY_DESTINATION_SVC_NAME:                      linkerd-destination.ifs-ingress.serviceaccount.identity.ifs-ingress.cluster.local
      LINKERD2_PROXY_POLICY_SVC_NAME:                           linkerd-destination.ifs-ingress.serviceaccount.identity.ifs-ingress.cluster.local
    Mounts:
      /var/run/linkerd/identity/end-entity from linkerd-identity-end-entity (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-rb92v (ro)
      /var/run/secrets/tokens from linkerd-identity-token (rw)
  ifsapp-odata:
    Image:       ifscloud.jfrog.io/docker/ifs/ifsapp-odata:23.1.0.20230518150529.0
    Ports:       8080/TCP, 8787/TCP
    Host Ports:  0/TCP, 0/TCP
    Limits:
      cpu:                10
      ephemeral-storage:  1G
      memory:             2500M
    Requests:
      cpu:                700m
      ephemeral-storage:  750M
      memory:             2500M
    Readiness:            http-get http://:8080/health/ready delay=5s timeout=5s period=20s #success=1 #failure=3
    Startup:              exec [bash /opt/ifs/startupProbeCheck.sh] delay=90s timeout=20s period=60s #success=1 #failure=30
    Environment:
      RELEASE:                            23.1
      SERVICE_UPDATE:                     23.1.0
      INSTALLATIONID:                     DEV_INST
      DEPLOYMENTMODEL:                    REMOTE
      ODP_JAVA_OPTS:                      -XX:MaxRAMPercentage=92.0
                                          -XX:-OmitStackTraceInFastThrow
                                          -Dodp.log.level=warn
                                          -Dodp.projection.cache.limit=275

      IFS_PROXY_URL:                      http://ifsapp-proxy
      IFS_DOMAIN_NAME:                    https://prd.dlhaxk233baevh3oanvsquyuke.bx.internal.cloudapp.net
      IFS_REALM_NAME:                     prd
      IFS_USERNAME_ATTRIBUTE:             preferred_username
      IFS_MAX_POOL_SIZE:                  25
      IFS_MIN_POOL_SIZE:                  1
      IFS_QUERY_TIMEOUT:                  299s
      IFS_MAX_QUERY_TIMEOUT:              5m
      IFS_STATEMENT_TIMEOUT:              15m
      IFS_INTEGRATION_QUERY_TIMEOUT:      10m
      IFS_INTEGRATION_MAX_QUERY_TIMEOUT:  15m
      IFS_INTEGRATION_STATEMENT_TIMEOUT:  30m
    Mounts:
      /etc/podinfo from labelinfo (rw)
      /opt/ifs/.secrets/secrets from secrets (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-rb92v (ro)
Conditions:
  Type           Status
  PodScheduled   False
Volumes:
  labelinfo:
    Type:  DownwardAPI (a volume populated by information about the pod)
    Items:
      metadata.labels -> labels
  secrets:
    Type:                Projected (a volume that contains injected data from multiple sources)
    SecretName:          ifssys-user
    SecretOptionalName:  <nil>
    SecretName:          ifssys-password
    SecretOptionalName:  <nil>
    SecretName:          ifsapp-user
    SecretOptionalName:  <nil>
    SecretName:          ifs-jdbc-url
    SecretOptionalName:  <nil>
    SecretName:          ifsiam-admin
    SecretOptionalName:  <nil>
    SecretName:          ifsiam-admin-pw
    SecretOptionalName:  <nil>
    SecretName:          ifs-dcat-client
    SecretOptionalName:  <nil>
    SecretName:          ifs-dcat-secret
    SecretOptionalName:  <nil>
    SecretName:          ifs-dcat-account
    SecretOptionalName:  <nil>
    SecretName:          ifs-tenant-id
    SecretOptionalName:  <nil>
    SecretName:          mob-secret
    SecretOptionalName:  <nil>
    SecretName:          rem-secret
    SecretOptionalName:  <nil>
    SecretName:          scim-secret
    SecretOptionalName:  <nil>
    SecretName:          crt-secret
    SecretOptionalName:  <nil>
    SecretName:          am-customer-type
    SecretOptionalName:  <nil>
    SecretName:          am-ifs-data-src
    SecretOptionalName:  <nil>
    SecretName:          am-ifsinfo-un
    SecretOptionalName:  <nil>
    SecretName:          am-ifsinfo-pwd
    SecretOptionalName:  <nil>
    SecretName:          am-sql-srv-type
    SecretOptionalName:  <nil>
    SecretName:          am-sql-srv-name
    SecretOptionalName:  <nil>
    SecretName:          am-sql-db-name
    SecretOptionalName:  <nil>
    SecretName:          am-sql-agent-name
    SecretOptionalName:  <nil>
    SecretName:          am-sql-un
    SecretOptionalName:  <nil>
    SecretName:          am-sql-pwd
    SecretOptionalName:  <nil>
    SecretName:          am-sql-ssisdb-name
    SecretOptionalName:  <nil>
    SecretName:          am-ssis-proj-name
    SecretOptionalName:  <nil>
    SecretName:          am-ssis-fold-name
    SecretOptionalName:  <nil>
    SecretName:          am-ssas-srv-name
    SecretOptionalName:  <nil>
    SecretName:          am-ssas-un
    SecretOptionalName:  <nil>
    SecretName:          am-ssas-pwd
    SecretOptionalName:  <nil>
    SecretName:          symmetric-key
    SecretOptionalName:  <nil>
    SecretName:          fss-secret
    SecretOptionalName:  <nil>
  kube-api-access-rb92v:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
  linkerd-proxy-init-xtables-lock:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  linkerd-identity-end-entity:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  <unset>
  linkerd-identity-token:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  86400
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age    From               Message
  ----     ------            ----   ----               -------
  Warning  FailedScheduling  4m48s  default-scheduler  0/1 nodes are available: 1 Insufficient cpu. preemption: not eligible due to preemptionPolicy=Never..

icon

Best answer by Charith Epitawatta 28 December 2023, 17:27

View original

2 replies

Userlevel 7
Badge +31

Hi @mihiran.p,

A Pod goes to Pending state when there aren’t enough CPU/memory resources to allocate. As you can see under the Events section in the Pod description, the reason is there isn’t enough CPU resources in the cluster to schedule the Pod. 

“Warning  FailedScheduling  4m48s  default-scheduler  0/1 nodes are available: 1 Insufficient cpu.”.

Please make sure there are enough CPU and memory resources allocated to the cluster to run all the Pods. 

I also noticed that the Pods that show as running are not running not in a complete READY state either since most of them show as ½ READY or similar. In almost all the Pods in IFS Cloud, there are 2 containers running and ½ means only one of them is running. This could probably be due to the same reason and you should be able to see the reason in Pod description and the logs. 

Hope this helps!

Userlevel 5
Badge +12

Hi Mihiran,

To add to Charith’s response, you can also “describe” the node itself to find more detailed resource (CPU & memory) information.

kubectl describe node

In particular the last few sections are quite relevant.

Best regards -- Ben

Reply