diff --git a/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go b/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go index 9594a33b129c..5e8419dcf57a 100644 --- a/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go +++ b/pkg/oc/cli/admin/diagnostics/diagnostics/cluster/app_create/app.go @@ -5,8 +5,12 @@ import ( "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/watch" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" appsv1 "github.com/openshift/api/apps/v1" @@ -100,7 +104,7 @@ This may be a transient error. Check the master API logs for anomalies near this } defer stopWatcher(watcher) for event := range watcher.ResultChan() { - running, err := kubectl.PodContainerRunning(d.appName)(event) + running, err := podContainerRunning(d.appName)(event) if err != nil { d.out.Error("DCluAC009", err, fmt.Sprintf(` %s: Error while watching for app pod to deploy: @@ -124,3 +128,44 @@ There are many reasons why this can occur; for example: `, now(), d.deployTimeout)) return false } + +// podContainerRunning returns false until the named container has ContainerStatus running (at least once), +// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. +func podContainerRunning(containerName string) watch.ConditionFunc { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodRunning, api.PodPending: + case api.PodFailed, api.PodSucceeded: + return false, kubectl.ErrPodCompleted + default: + return false, nil + } + for _, s := range t.Status.ContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + for _, s := range t.Status.InitContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + return false, nil + } + return false, nil + } +} diff --git a/pkg/oc/cli/debug/debug.go b/pkg/oc/cli/debug/debug.go index 2acf18cc1c01..4fe8a4b83c25 100644 --- a/pkg/oc/cli/debug/debug.go +++ b/pkg/oc/cli/debug/debug.go @@ -46,6 +46,7 @@ import ( appsutil "github.com/openshift/origin/pkg/apps/util" imageapi "github.com/openshift/origin/pkg/image/apis/image" imageutil "github.com/openshift/origin/pkg/image/util" + conditions "github.com/openshift/origin/pkg/oc/lib/conditions" generateapp "github.com/openshift/origin/pkg/oc/lib/newapp/app" utilenv "github.com/openshift/origin/pkg/oc/util/env" ) @@ -103,8 +104,9 @@ type DebugOptions struct { AppsClient appsv1client.AppsV1Interface ImageClient imagev1client.ImageV1Interface - Printer printers.ResourcePrinter - LogsForObject polymorphichelpers.LogsForObjectFunc + Printer printers.ResourcePrinter + LogsForObject polymorphichelpers.LogsForObjectFunc + RESTClientGetter genericclioptions.RESTClientGetter NoStdin bool ForceTTY bool @@ -211,6 +213,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, f kcmdutil.Factory, args []s return kcmdutil.UsageErrorf(cmd, "all resources must be specified before environment changes: %s", strings.Join(args, " ")) } o.Resources = resources + o.RESTClientGetter = f switch { case o.ForceTTY && o.NoStdin: @@ -432,7 +435,7 @@ func (o *DebugOptions) RunDebug() error { } fmt.Fprintf(o.ErrOut, "Waiting for pod to start ...\n") - switch containerRunningEvent, err := watch.Until(o.Timeout, w, kubectl.PodContainerRunning(o.Attach.ContainerName)); { + switch containerRunningEvent, err := watch.Until(o.Timeout, w, conditions.PodContainerRunning(o.Attach.ContainerName)); { // api didn't error right away but the pod wasn't even created case kapierrors.IsNotFound(err): msg := fmt.Sprintf("unable to create the debug pod %q", pod.Name) @@ -444,12 +447,14 @@ func (o *DebugOptions) RunDebug() error { case err == kubectl.ErrPodCompleted, err == kubectl.ErrContainerTerminated, !o.Attach.Stdin: return kcmd.LogsOptions{ Object: pod, - Options: &kapi.PodLogOptions{ + Options: &corev1.PodLogOptions{ Container: o.Attach.ContainerName, Follow: true, }, - IOStreams: o.IOStreams, - LogsForObject: o.LogsForObject, + RESTClientGetter: o.RESTClientGetter, + ConsumeRequestFn: kcmd.DefaultConsumeRequestFn, + IOStreams: o.IOStreams, + LogsForObject: o.LogsForObject, }.RunLogs() case err != nil: return err diff --git a/pkg/oc/cli/newapp/newapp.go b/pkg/oc/cli/newapp/newapp.go index 44736e6cda06..c81703c80892 100644 --- a/pkg/oc/cli/newapp/newapp.go +++ b/pkg/oc/cli/newapp/newapp.go @@ -161,8 +161,11 @@ type ObjectGeneratorOptions struct { } type AppOptions struct { - genericclioptions.IOStreams *ObjectGeneratorOptions + + RESTClientGetter genericclioptions.RESTClientGetter + + genericclioptions.IOStreams } type versionedPrintObj struct { @@ -367,6 +370,8 @@ func NewCmdNewApplication(name, baseName string, f kcmdutil.Factory, streams gen // Complete sets any default behavior for the command func (o *AppOptions) Complete(baseName, commandName string, f kcmdutil.Factory, c *cobra.Command, args []string) error { + o.RESTClientGetter = f + cmdutil.WarnAboutCommaSeparation(o.ErrOut, o.ObjectGeneratorOptions.Config.TemplateParameters, "--param") err := o.ObjectGeneratorOptions.Complete(baseName, commandName, f, c, args) if err != nil { @@ -510,7 +515,7 @@ func (o *AppOptions) RunNewApp() error { switch { case len(installing) == 1: - return followInstallation(config, installing[0], o.LogsForObject) + return followInstallation(config, o.RESTClientGetter, installing[0], o.LogsForObject) case len(installing) > 1: for i := range installing { fmt.Fprintf(out, "%sTrack installation of %s with '%s logs %s'.\n", indent, installing[i].Name, o.BaseName, installing[i].Name) @@ -554,7 +559,7 @@ func getServices(items []runtime.Object) []*corev1.Service { return svc } -func followInstallation(config *newcmd.AppConfig, pod *corev1.Pod, logsForObjectFn polymorphichelpers.LogsForObjectFunc) error { +func followInstallation(config *newcmd.AppConfig, clientGetter genericclioptions.RESTClientGetter, pod *corev1.Pod, logsForObjectFn polymorphichelpers.LogsForObjectFunc) error { fmt.Fprintf(config.Out, "--> Installing ...\n") // we cannot retrieve logs until the pod is out of pending @@ -567,12 +572,14 @@ func followInstallation(config *newcmd.AppConfig, pod *corev1.Pod, logsForObject opts := &kcmd.LogsOptions{ Namespace: pod.Namespace, ResourceArg: pod.Name, - Options: &kapi.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: true, Container: pod.Spec.Containers[0].Name, }, - LogsForObject: logsForObjectFn, - IOStreams: genericclioptions.IOStreams{Out: config.Out}, + RESTClientGetter: clientGetter, + ConsumeRequestFn: kcmd.DefaultConsumeRequestFn, + LogsForObject: logsForObjectFn, + IOStreams: genericclioptions.IOStreams{Out: config.Out}, } logErr := opts.RunLogs() diff --git a/pkg/oc/lib/conditions/conditions.go b/pkg/oc/lib/conditions/conditions.go new file mode 100644 index 000000000000..8e104c088a72 --- /dev/null +++ b/pkg/oc/lib/conditions/conditions.go @@ -0,0 +1,50 @@ +package conditions + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/kubernetes/pkg/kubectl" +) + +// PodContainerRunning returns false until the named container has ContainerStatus running (at least once), +// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. +func PodContainerRunning(containerName string) watch.ConditionFunc { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *corev1.Pod: + switch t.Status.Phase { + case corev1.PodRunning, corev1.PodPending: + case corev1.PodFailed, corev1.PodSucceeded: + return false, kubectl.ErrPodCompleted + default: + return false, nil + } + for _, s := range t.Status.ContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + for _, s := range t.Status.InitContainerStatuses { + if s.Name != containerName { + continue + } + if s.State.Terminated != nil { + return false, kubectl.ErrContainerTerminated + } + return s.State.Running != nil, nil + } + return false, nil + } + return false, nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go index d8b8191b0aec..d1733d21d1fa 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go @@ -212,7 +212,7 @@ func (o *LogsOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []str return err } - o.ConsumeRequestFn = consumeRequest + o.ConsumeRequestFn = DefaultConsumeRequestFn o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd) if err != nil { @@ -303,7 +303,7 @@ func (o LogsOptions) RunLogs() error { return nil } -func consumeRequest(request *rest.Request, out io.Writer) error { +func DefaultConsumeRequestFn(request *rest.Request, out io.Writer) error { readCloser, err := request.Stream() if err != nil { return err diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go index 5652ac9e88b0..bac826997945 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/run.go @@ -548,7 +548,7 @@ func logOpts(restClientGetter genericclioptions.RESTClientGetter, pod *corev1.Po return err } for _, request := range requests { - if err := consumeRequest(request, opts.Out); err != nil { + if err := DefaultConsumeRequestFn(request, opts.Out); err != nil { return err } }