diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 5a7c9db38ba9c..4e59f91ec8bc0 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -59,6 +60,7 @@ type DrainOptions struct { Timeout time.Duration backOff clockwork.Clock DeleteLocalData bool + PodSelector string mapper meta.RESTMapper nodeInfo *resource.Info Out io.Writer @@ -190,6 +192,7 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { cmd.Flags().BoolVar(&options.DeleteLocalData, "delete-local-data", false, "Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained).") cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite") + cmd.Flags().StringVarP(&options.PodSelector, "pod-selector", "", options.PodSelector, "Label selector to filter pods on the node") return cmd } @@ -201,6 +204,12 @@ func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error { return cmdutil.UsageErrorf(cmd, "USAGE: %s [flags]", cmd.Use) } + if len(o.PodSelector) > 0 { + if _, err := labels.Parse(o.PodSelector); err != nil { + return errors.New("--pod-selector= must be a valid label selector") + } + } + if o.client, err = o.Factory.ClientSet(); err != nil { return err } @@ -268,38 +277,8 @@ func (o *DrainOptions) deleteOrEvictPodsSimple() error { return err } -func (o *DrainOptions) getController(namespace string, controllerRef *metav1.OwnerReference) (interface{}, error) { - switch controllerRef.Kind { - case "ReplicationController": - return o.client.Core().ReplicationControllers(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "DaemonSet": - return o.client.Extensions().DaemonSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "Job": - return o.client.Batch().Jobs(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "ReplicaSet": - return o.client.Extensions().ReplicaSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - case "StatefulSet": - return o.client.Apps().StatefulSets(namespace).Get(controllerRef.Name, metav1.GetOptions{}) - } - return nil, fmt.Errorf("Unknown controller kind %q", controllerRef.Kind) -} - -func (o *DrainOptions) getPodController(pod api.Pod) (*metav1.OwnerReference, error) { - controllerRef := metav1.GetControllerOf(&pod) - if controllerRef == nil { - return nil, nil - } - - // We assume the only reason for an error is because the controller is - // gone/missing, not for any other cause. - // TODO(mml): something more sophisticated than this - // TODO(juntee): determine if it's safe to remove getController(), - // so that drain can work for controller types that we don't know about - _, err := o.getController(pod.Namespace, controllerRef) - if err != nil { - return nil, err - } - return controllerRef, nil +func (o *DrainOptions) getPodController(pod api.Pod) *metav1.OwnerReference { + return metav1.GetControllerOf(&pod) } func (o *DrainOptions) unreplicatedFilter(pod api.Pod) (bool, *warning, *fatal) { @@ -308,21 +287,15 @@ func (o *DrainOptions) unreplicatedFilter(pod api.Pod) (bool, *warning, *fatal) return true, nil, nil } - controllerRef, err := o.getPodController(pod) - if err != nil { - // if we're forcing, remove orphaned pods with a warning - if apierrors.IsNotFound(err) && o.Force { - return true, &warning{err.Error()}, nil - } - return false, nil, &fatal{err.Error()} - } + controllerRef := o.getPodController(pod) if controllerRef != nil { return true, nil, nil } - if !o.Force { - return false, nil, &fatal{kUnmanagedFatal} + if o.Force { + return true, &warning{kUnmanagedWarning}, nil } - return true, &warning{kUnmanagedWarning}, nil + + return false, nil, &fatal{kUnmanagedFatal} } func (o *DrainOptions) daemonsetFilter(pod api.Pod) (bool, *warning, *fatal) { @@ -333,23 +306,23 @@ func (o *DrainOptions) daemonsetFilter(pod api.Pod) (bool, *warning, *fatal) { // The exception is for pods that are orphaned (the referencing // management resource - including DaemonSet - is not found). // Such pods will be deleted if --force is used. - controllerRef, err := o.getPodController(pod) - if err != nil { - // if we're forcing, remove orphaned pods with a warning - if apierrors.IsNotFound(err) && o.Force { - return true, &warning{err.Error()}, nil - } - return false, nil, &fatal{err.Error()} - } + controllerRef := o.getPodController(pod) if controllerRef == nil || controllerRef.Kind != "DaemonSet" { return true, nil, nil } + if _, err := o.client.Extensions().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { + // remove orphaned pods with a warning if --force is used + if apierrors.IsNotFound(err) && o.Force { + return true, &warning{err.Error()}, nil + } return false, nil, &fatal{err.Error()} } + if !o.IgnoreDaemonsets { return false, nil, &fatal{kDaemonsetFatal} } + return false, &warning{kDaemonsetWarning}, nil } @@ -395,7 +368,13 @@ func (ps podStatuses) Message() string { // getPodsForDeletion returns all the pods we're going to delete. If there are // any pods preventing us from deleting, we return that list in an error. func (o *DrainOptions) getPodsForDeletion() (pods []api.Pod, err error) { + labelSelector, err := labels.Parse(o.PodSelector) + if err != nil { + return pods, err + } + podList, err := o.client.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ + LabelSelector: labelSelector.String(), FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": o.nodeInfo.Name}).String()}) if err != nil { return pods, err