Workaround to logging issues with ICP

Workaround to errors being encountered with the version of kubernetes
used in ICP. Logging broke in ICP v1.2 with the update to k8s 1.6.2
as it would hang after the container finished. In ICP v2.1 logging does
not work 100% of the time when getting the logs of multiple pods
simultaneously. As a workaround, adding a new variable that will be used
to launch the workaround template and instead of logging do a kubectl
exec to get the log output.

Now let me explain what the workaround is, instead of launching directly
into the script at container start, it will instead sleep for 2 hours.
Then the logging section of the kubernetes-launch.sh will instead of
logging do an exec command to start the script. This will stream the
output of running the script to whatever shell launched the script. It
will then rely on the "purge" variable to clear out the container once
the script completes.

Change-Id: I23e2ef48731bfc4c9ea8e0a3128fa32ca1901d9f
Signed-off-by: Alanny Lopez <alanny.lopez@ibm.com>
diff --git a/kubernetes/Templates/OpenBMC-build-job-v2.yaml b/kubernetes/Templates/OpenBMC-build-job-v2.yaml
new file mode 100644
index 0000000..902246c
--- /dev/null
+++ b/kubernetes/Templates/OpenBMC-build-job-v2.yaml
@@ -0,0 +1,48 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: openbmc${BUILD_ID}-${target}
+  namespace: ${namespace}
+  labels:
+    app: openbmc
+    stage: build
+spec:
+  template:
+    metadata:
+      name: ${podname}
+      labels:
+        target: ${target}
+    spec:
+      nodeSelector:
+        beta.kubernetes.io/arch: ${ARCH}
+      volumes:
+      - name: home
+        persistentVolumeClaim:
+          claimName: ${hclaim}
+      - name: sscdir
+        persistentVolumeClaim:
+          claimName: ${sclaim}
+      restartPolicy: Never
+      hostNetwork: True
+      containers:
+      - image: ${imgname}
+        name: builder
+        command: ["/bin/bash","-c"]
+        args: ["sleep 2h"]
+        workingDir: ${HOME}
+        env:
+        - name: WORKSPACE
+          value: ${WORKSPACE}
+        - name: builddir
+          value: ${builddir}
+        securityContext:
+          capabilities:
+            add:
+            - SYS_ADMIN
+        volumeMounts:
+        - name: home
+          mountPath: ${HOME}
+        - name: sscdir
+          mountPath: ${sscdir}
+      imagePullSecrets:
+      - name: ${imgplsec}
\ No newline at end of file
diff --git a/kubernetes/Templates/OpenBMC-build-pod-v2.yaml b/kubernetes/Templates/OpenBMC-build-pod-v2.yaml
new file mode 100644
index 0000000..4072892
--- /dev/null
+++ b/kubernetes/Templates/OpenBMC-build-pod-v2.yaml
@@ -0,0 +1,39 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: ${podname}
+  namespace: ${namespace}
+spec:
+  nodeSelector:
+    beta.kubernetes.io/arch: ${ARCH}
+  volumes:
+  - name: home
+    persistentVolumeClaim:
+      claimName: ${hclaim}
+  - name: sscdir
+    persistentVolumeClaim:
+      claimName: ${sclaim}
+  hostNetwork: True
+  containers:
+  - image: ${imgname}
+    name: builder
+    command: ["/bin/bash","-c"]
+    args: ["sleep 2h"]
+    workingDir: ${HOME}
+    env:
+    - name: WORKSPACE
+      value: ${WORKSPACE}
+    - name: builddir
+      value: ${builddir}
+    securityContext:
+      capabilities:
+        add:
+        - SYS_ADMIN
+    volumeMounts:
+    - name: home
+      mountPath: ${HOME}
+    - name: sscdir
+      mountPath: ${sscdir}
+  restartPolicy: Never
+  imagePullSecrets:
+  - name: ${imgplsec}
\ No newline at end of file
diff --git a/kubernetes/kubernetes-launch.sh b/kubernetes/kubernetes-launch.sh
index 65dc313..93bae3d 100755
--- a/kubernetes/kubernetes-launch.sh
+++ b/kubernetes/kubernetes-launch.sh
@@ -43,7 +43,13 @@
 #  purge        = set to true delete the created object once script completes
 #  launch       = used to determine the template for YAML file, Usually brought
 #                 in by sourcing from another script but can be declared
-#
+#  workaround   = Used to enable the logging workaround, when set will launch a
+#                 modified template that waits for a command. In most cases it
+#                 will be waiting to have a script run via kubectl exec. Needed
+#                 when using a version of Kubernetes that has known issues that
+#                 impact the retrieval of container logs when using kubectl.
+#                 Defaulting to be true whenever logging is enabled until ICP
+#                 upgrades their Kubernetes version.
 ###############################################################################
 
 # Kubernetes Variables
@@ -58,6 +64,7 @@
 log=${log:-${2}}
 purge=${purge:-${3}}
 launch=${launch:-${4}}
+workaround=${workaround:-${log}}
 
 # Set the variables for the specific invoker to fill in the YAML template
 # Other variables in the template not declared here are declared by invoker
@@ -103,7 +110,12 @@
   ARCH=amd64
 fi
 
-yamlfile=$(eval "echo \"$(<./kubernetes/Templates/${invoker}-${launch}.yaml)\"")
+extras=""
+if [[ "${workaround}" == "true" ]]; then
+  extras+="-v2"
+fi
+
+yamlfile=$(eval "echo \"$(<./kubernetes/Templates/${invoker}-${launch}${extras}.yaml)\"")
 kubectl create -f - <<< "${yamlfile}"
 
 # If launch is a job we have to find the podname with identifiers
@@ -141,8 +153,12 @@
     fi
     status=$( ${checkstatus} | grep Status: )
   done
-  # Tail the logs of the pod
-  kubectl logs -f ${podname} -n ${namespace}
+  # Tail the logs of the pod, if workaround enabled start executing build script instead.
+  if [[ "${workaround}" == "true" ]]; then
+    kubectl exec -it ${podname} -n ${namespace} ${WORKSPACE}/build.sh
+  else
+    kubectl logs -f ${podname} -n ${namespace}
+  fi
 fi
 
 # Delete the object if purge is true