From 05a6158ec9b4879969ef8aed14e122aa4593e679 Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Mon, 29 Jan 2024 14:45:02 +0200 Subject: [PATCH] Fix comments and rebase Signed-off-by: Sebastian Sch --- controllers/drain_controller.go | 135 +++++++----------- controllers/drain_controller_test.go | 110 +++++++++++--- controllers/helper.go | 16 +-- .../sriovnetworknodepolicy_controller.go | 3 - controllers/suite_test.go | 8 +- deploy/clusterrole.yaml | 2 +- .../templates/clusterrole.yaml | 2 +- main.go | 7 +- pkg/consts/constants.go | 5 +- pkg/daemon/daemon.go | 91 +++++++----- pkg/daemon/daemon_test.go | 44 ++---- pkg/drain/drainer.go | 16 +-- pkg/platforms/mock/mock_platforms.go | 24 ++-- .../openshift/mock/mock_openshift.go | 24 ++-- pkg/platforms/openshift/openshift.go | 8 +- pkg/plugins/mellanox/mellanox_plugin.go | 4 + pkg/vars/vars.go | 5 +- 17 files changed, 265 insertions(+), 239 deletions(-) diff --git a/controllers/drain_controller.go b/controllers/drain_controller.go index f9e98f7e5..00cbbb949 100644 --- a/controllers/drain_controller.go +++ b/controllers/drain_controller.go @@ -19,7 +19,6 @@ package controllers import ( "context" "fmt" - "os" "sync" "time" @@ -50,23 +49,15 @@ import ( type DrainReconcile struct { client.Client - Scheme *runtime.Scheme - recorder record.EventRecorder - drainer drain.DrainInterface - resourcePrefix string - - nodesInReconcile map[string]interface{} - nodesInReconcileMutex sync.Mutex - drainCheckMutex sync.Mutex + Scheme *runtime.Scheme + recorder record.EventRecorder + drainer drain.DrainInterface + + drainCheckMutex sync.Mutex } func NewDrainReconcileController(client client.Client, Scheme *runtime.Scheme, recorder record.EventRecorder, platformHelper platforms.Interface) (*DrainReconcile, error) { - resourcePrefix := os.Getenv("RESOURCE_PREFIX") - if resourcePrefix == "" { - return nil, fmt.Errorf("RESOURCE_PREFIX environment variable can't be empty") - } - - drainer, err := drain.NewDrainer(resourcePrefix, platformHelper) + drainer, err := drain.NewDrainer(platformHelper) if err != nil { return nil, err } @@ -76,30 +67,8 @@ func NewDrainReconcileController(client client.Client, Scheme *runtime.Scheme, r Scheme, recorder, drainer, - resourcePrefix, - map[string]interface{}{}, - sync.Mutex{}, sync.Mutex{}}, nil } -func (dr *DrainReconcile) TryLockNode(nodeName string) bool { - dr.nodesInReconcileMutex.Lock() - defer dr.nodesInReconcileMutex.Unlock() - - _, exist := dr.nodesInReconcile[nodeName] - if exist { - return false - } - - dr.nodesInReconcile[nodeName] = nil - return true -} - -func (dr *DrainReconcile) unlockNode(nodeName string) { - dr.nodesInReconcileMutex.Lock() - defer dr.nodesInReconcileMutex.Unlock() - - delete(dr.nodesInReconcile, nodeName) -} //+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;update;patch //+kubebuilder:rbac:groups=sriovnetwork.openshift.io,resources=sriovnodestates,verbs=get;list;watch @@ -110,74 +79,41 @@ func (dr *DrainReconcile) unlockNode(nodeName string) { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile func (dr *DrainReconcile) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // try to lock the node to this reconcile loop. - // if we are not able this means there is another loop already handling this node, so we just exist - if !dr.TryLockNode(req.Name) { - return ctrl.Result{}, nil - } - // configure logs reqLogger := log.FromContext(ctx) reqLogger.Info("Reconciling Drain") reqLogger.V(2).Info("node locked for drain controller", "nodeName", req.Name) - // we send to another function so the operator can have a defer function to release the lock - return dr.reconcile(ctx, req) -} - -func (dr *DrainReconcile) reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // remove the lock when we exist the function - defer dr.unlockNode(req.Name) - req.Namespace = vars.Namespace - // configure logs - reqLogger := log.FromContext(ctx) - // get node object node := &corev1.Node{} - err := dr.Get(ctx, req.NamespacedName, node) + err := dr.getObject(ctx, req, node) if err != nil { - if errors.IsNotFound(err) { - reqLogger.Error(err, "node object doesn't exist", "nodeName", req.Name) - return ctrl.Result{}, nil - } - - reqLogger.Error(err, "failed to get node object from api re-queue the request", "nodeName", req.Name) + reqLogger.Error(err, "failed to get node object") return ctrl.Result{}, err } // get sriovNodeNodeState object nodeNetworkState := &sriovnetworkv1.SriovNetworkNodeState{} - err = dr.Get(ctx, req.NamespacedName, nodeNetworkState) + err = dr.getObject(ctx, req, nodeNetworkState) if err != nil { - if errors.IsNotFound(err) { - reqLogger.Error(err, "sriovNetworkNodeState object doesn't exist", "nodeName", req.Name) - return ctrl.Result{}, nil - } - - reqLogger.Error(err, "failed to get sriovNetworkNodeState object from api re-queue the request", "nodeName", req.Name) + reqLogger.Error(err, "failed to get sriovNetworkNodeState object") return ctrl.Result{}, err } // create the drain state annotation if it doesn't exist in the sriovNetworkNodeState object - nodeStateDrainAnnotationCurrent, NodeStateDrainAnnotationCurrentExist := nodeNetworkState.Annotations[constants.NodeStateDrainAnnotationCurrent] - if !NodeStateDrainAnnotationCurrentExist { - err = utils.AnnotateObject(nodeNetworkState, constants.NodeStateDrainAnnotationCurrent, constants.DrainIdle, dr.Client) - if err != nil { - return ctrl.Result{}, err - } - nodeStateDrainAnnotationCurrent = constants.DrainIdle + nodeStateDrainAnnotationCurrent, err := dr.ensureAnnotationExists(nodeNetworkState, constants.NodeStateDrainAnnotationCurrent) + if err != nil { + reqLogger.Error(err, "failed to ensure nodeStateDrainAnnotation") + return ctrl.Result{}, err } // create the drain state annotation if it doesn't exist in the node object - nodeDrainAnnotation, nodeDrainAnnotationExist := node.Annotations[constants.NodeDrainAnnotation] - if !nodeDrainAnnotationExist { - err = utils.AnnotateObject(node, constants.NodeDrainAnnotation, constants.DrainIdle, dr.Client) - if err != nil { - return ctrl.Result{}, err - } - nodeDrainAnnotation = constants.DrainIdle + nodeDrainAnnotation, err := dr.ensureAnnotationExists(node, constants.NodeDrainAnnotation) + if err != nil { + reqLogger.Error(err, "failed to ensure nodeStateDrainAnnotation") + return ctrl.Result{}, err } reqLogger.V(2).Info("Drain annotations", "nodeAnnotation", nodeDrainAnnotation, "nodeStateAnnotation", nodeStateDrainAnnotationCurrent) @@ -218,6 +154,7 @@ func (dr *DrainReconcile) reconcile(ctx context.Context, req ctrl.Request) (ctrl corev1.EventTypeWarning, "DrainController", "node complete drain was not completed") + // TODO: make this time configurable return reconcile.Result{RequeueAfter: 5 * time.Second}, nil } @@ -235,7 +172,7 @@ func (dr *DrainReconcile) reconcile(ctx context.Context, req ctrl.Request) (ctrl "node un drain completed") return ctrl.Result{}, nil } - } else { + } else if nodeDrainAnnotation == constants.DrainRequired || nodeDrainAnnotation == constants.RebootRequired { // this cover the case a node request to drain or reboot // nothing to do here we need to wait for the node to move back to idle @@ -298,6 +235,38 @@ func (dr *DrainReconcile) reconcile(ctx context.Context, req ctrl.Request) (ctrl return reconcile.Result{}, fmt.Errorf("unexpected node drain annotation") } +func (dr *DrainReconcile) getObject(ctx context.Context, req ctrl.Request, object client.Object) error { + // configure logs + reqLogger := log.FromContext(ctx) + reqLogger.Info("checkForNodeDrain():") + + err := dr.Get(ctx, req.NamespacedName, object) + if err != nil { + if errors.IsNotFound(err) { + reqLogger.Error(err, "object doesn't exist", "objectName", req.Name) + return nil + } + + reqLogger.Error(err, "failed to get object from api re-queue the request", "objectName", req.Name) + return err + } + + return nil +} + +func (dr *DrainReconcile) ensureAnnotationExists(object client.Object, key string) (string, error) { + value, exist := object.GetAnnotations()[key] + if !exist { + err := utils.AnnotateObject(object, constants.NodeStateDrainAnnotationCurrent, constants.DrainIdle, dr.Client) + if err != nil { + return "", err + } + return constants.DrainIdle, nil + } + + return value, nil +} + func (dr *DrainReconcile) checkForNodeDrain(ctx context.Context, node *corev1.Node) (*reconcile.Result, error) { // configure logs reqLogger := log.FromContext(ctx) diff --git a/controllers/drain_controller_test.go b/controllers/drain_controller_test.go index 05051a8e1..569629eb5 100644 --- a/controllers/drain_controller_test.go +++ b/controllers/drain_controller_test.go @@ -2,24 +2,101 @@ package controllers import ( "context" + "sync" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/golang/mock/gomock" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1" constants "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/consts" + mock_platforms "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/platforms/mock" + "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/platforms/openshift" "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/utils" "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/vars" ) -var _ = Describe("Drain Controller", func() { +var _ = Describe("Drain Controller", Ordered, func() { + + var cancel context.CancelFunc + var ctx context.Context + + BeforeAll(func() { + By("Create default SriovNetworkPoolConfig k8s objs") + maxun := intstr.Parse("1") + poolConfig := &sriovnetworkv1.SriovNetworkPoolConfig{} + poolConfig.SetNamespace(testNamespace) + poolConfig.SetName(constants.DefaultConfigName) + poolConfig.Spec = sriovnetworkv1.SriovNetworkPoolConfigSpec{MaxUnavailable: &maxun, NodeSelector: &metav1.LabelSelector{}} + Expect(k8sClient.Create(context.Background(), poolConfig)).Should(Succeed()) + DeferCleanup(func() { + err := k8sClient.Delete(context.Background(), poolConfig) + Expect(err).ToNot(HaveOccurred()) + }) + + By("Setup controller manager") + k8sManager, err := setupK8sManagerForTest() + Expect(err).ToNot(HaveOccurred()) + + t := GinkgoT() + mockCtrl := gomock.NewController(t) + platformHelper := mock_platforms.NewMockInterface(mockCtrl) + platformHelper.EXPECT().GetFlavor().Return(openshift.OpenshiftFlavorDefault).AnyTimes() + platformHelper.EXPECT().IsOpenshiftCluster().Return(false).AnyTimes() + platformHelper.EXPECT().IsHypershift().Return(false).AnyTimes() + platformHelper.EXPECT().OpenshiftBeforeDrainNode(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes() + platformHelper.EXPECT().OpenshiftAfterCompleteDrainNode(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes() + + // we need a client that doesn't use the local cache for the objects + drainKClient, err := client.New(cfg, client.Options{ + Scheme: scheme.Scheme, + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &sriovnetworkv1.SriovNetworkNodeState{}, + &corev1.Node{}, + &mcfgv1.MachineConfigPool{}, + }, + }, + }) + Expect(err).ToNot(HaveOccurred()) + + drainController, err := NewDrainReconcileController(drainKClient, + k8sManager.GetScheme(), + k8sManager.GetEventRecorderFor("operator"), + platformHelper) + Expect(err).ToNot(HaveOccurred()) + err = drainController.SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + ctx, cancel = context.WithCancel(context.Background()) + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + defer GinkgoRecover() + By("Start controller manager") + err := k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred()) + }() + + DeferCleanup(func() { + By("Shutdown controller manager") + cancel() + wg.Wait() + }) + }) + BeforeEach(func() { Expect(k8sClient.DeleteAllOf(context.Background(), &corev1.Node{})).ToNot(HaveOccurred()) Expect(k8sClient.DeleteAllOf(context.Background(), &sriovnetworkv1.SriovNetworkNodeState{}, client.InNamespace(vars.Namespace))).ToNot(HaveOccurred()) @@ -36,7 +113,7 @@ var _ = Describe("Drain Controller", func() { Context("when there is only one node", func() { It("should drain", func(ctx context.Context) { - node, nodeState := createNode("node1") + node, nodeState := createNode(ctx, "node1") simulateDaemonSetAnnotation(node, constants.DrainRequired) @@ -53,9 +130,9 @@ var _ = Describe("Drain Controller", func() { Context("when there are multiple nodes", func() { It("should drain nodes serially with default pool selector", func(ctx context.Context) { - node1, nodeState1 := createNode("node1") - node2, nodeState2 := createNode("node2") - node3, nodeState3 := createNode("node3") + node1, nodeState1 := createNode(ctx, "node1") + node2, nodeState2 := createNode(ctx, "node2") + node3, nodeState3 := createNode(ctx, "node3") // Two nodes require to drain at the same time simulateDaemonSetAnnotation(node1, constants.DrainRequired) @@ -93,9 +170,9 @@ var _ = Describe("Drain Controller", func() { }) It("should drain nodes in parallel with a custom pool selector", func(ctx context.Context) { - node1, nodeState1 := createNode("node1") - node2, nodeState2 := createNode("node2") - node3, nodeState3 := createNode("node3") + node1, nodeState1 := createNode(ctx, "node1") + node2, nodeState2 := createNode(ctx, "node2") + node3, nodeState3 := createNode(ctx, "node3") maxun := intstr.Parse("2") poolConfig := &sriovnetworkv1.SriovNetworkPoolConfig{} @@ -112,7 +189,7 @@ var _ = Describe("Drain Controller", func() { simulateDaemonSetAnnotation(node1, constants.DrainRequired) simulateDaemonSetAnnotation(node2, constants.DrainRequired) - // Only the first node drains + // Both nodes drain expectNodeStateAnnotation(nodeState1, constants.DrainComplete) expectNodeStateAnnotation(nodeState2, constants.DrainComplete) expectNodeStateAnnotation(nodeState3, constants.DrainIdle) @@ -144,9 +221,9 @@ var _ = Describe("Drain Controller", func() { }) It("should drain nodes in parallel with a custom pool selector and honor MaxUnavailable", func(ctx context.Context) { - node1, nodeState1 := createNode("node1") - node2, nodeState2 := createNode("node2") - node3, nodeState3 := createNode("node3") + node1, nodeState1 := createNode(ctx, "node1") + node2, nodeState2 := createNode(ctx, "node2") + node3, nodeState3 := createNode(ctx, "node3") maxun := intstr.Parse("2") poolConfig := &sriovnetworkv1.SriovNetworkPoolConfig{} @@ -169,9 +246,9 @@ var _ = Describe("Drain Controller", func() { }) It("should drain all nodes in parallel with a custom pool using nil in max unavailable", func(ctx context.Context) { - node1, nodeState1 := createNode("node1") - node2, nodeState2 := createNode("node2") - node3, nodeState3 := createNode("node3") + node1, nodeState1 := createNode(ctx, "node1") + node2, nodeState2 := createNode(ctx, "node2") + node3, nodeState3 := createNode(ctx, "node3") poolConfig := &sriovnetworkv1.SriovNetworkPoolConfig{} poolConfig.SetNamespace(testNamespace) @@ -188,7 +265,6 @@ var _ = Describe("Drain Controller", func() { simulateDaemonSetAnnotation(node2, constants.DrainRequired) simulateDaemonSetAnnotation(node3, constants.DrainRequired) - // Only the first node drains expectNodeStateAnnotation(nodeState1, constants.DrainComplete) expectNodeStateAnnotation(nodeState2, constants.DrainComplete) expectNodeStateAnnotation(nodeState3, constants.DrainComplete) @@ -261,7 +337,7 @@ func simulateDaemonSetAnnotation(node *corev1.Node, drainAnnotationValue string) ToNot(HaveOccurred()) } -func createNode(nodeName string) (*corev1.Node, *sriovnetworkv1.SriovNetworkNodeState) { +func createNode(ctx context.Context, nodeName string) (*corev1.Node, *sriovnetworkv1.SriovNetworkNodeState) { node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, diff --git a/controllers/helper.go b/controllers/helper.go index a47ea9793..823e4a369 100644 --- a/controllers/helper.go +++ b/controllers/helper.go @@ -33,8 +33,8 @@ import ( kscheme "k8s.io/client-go/kubernetes/scheme" k8sclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1" @@ -63,27 +63,21 @@ type DrainAnnotationPredicate struct { } func (DrainAnnotationPredicate) Create(e event.CreateEvent) bool { - logger := log.FromContext(context.TODO()) if e.Object == nil { - logger.Info("Create event: node has no drain annotation", "node", e.Object.GetName()) return false } if _, hasAnno := e.Object.GetAnnotations()[constants.NodeDrainAnnotation]; hasAnno { - logger.Info("Create event: node has no drain annotation", "node", e.Object.GetName()) return true } return false } func (DrainAnnotationPredicate) Update(e event.UpdateEvent) bool { - logger := log.FromContext(context.TODO()) if e.ObjectOld == nil { - logger.Info("Update event has no old object to update", "node", e.ObjectOld.GetName()) return false } if e.ObjectNew == nil { - logger.Info("Update event has no new object for update", "node", e.ObjectNew.GetName()) return false } @@ -106,27 +100,21 @@ type DrainStateAnnotationPredicate struct { } func (DrainStateAnnotationPredicate) Create(e event.CreateEvent) bool { - logger := log.FromContext(context.TODO()) if e.Object == nil { - logger.Info("Create event: node has no drain annotation", "node", e.Object.GetName()) return false } if _, hasAnno := e.Object.GetLabels()[constants.NodeStateDrainAnnotationCurrent]; hasAnno { - logger.Info("Create event: node has no drain annotation", "node", e.Object.GetName()) return true } return false } func (DrainStateAnnotationPredicate) Update(e event.UpdateEvent) bool { - logger := log.FromContext(context.TODO()) if e.ObjectOld == nil { - logger.Info("Update event has no old object to update", "node", e.ObjectOld.GetName()) return false } if e.ObjectNew == nil { - logger.Info("Update event has no new object for update", "node", e.ObjectNew.GetName()) return false } @@ -192,7 +180,7 @@ func syncPluginDaemonObjs(ctx context.Context, data.Data["Namespace"] = vars.Namespace data.Data["SRIOVDevicePluginImage"] = os.Getenv("SRIOV_DEVICE_PLUGIN_IMAGE") data.Data["ReleaseVersion"] = os.Getenv("RELEASEVERSION") - data.Data["ResourcePrefix"] = os.Getenv("RESOURCE_PREFIX") + data.Data["ResourcePrefix"] = vars.ResourcePrefix data.Data["ImagePullSecrets"] = GetImagePullSecrets() data.Data["NodeSelectorField"] = GetDefaultNodeSelector() data.Data["UseCDI"] = dc.Spec.UseCDI diff --git a/controllers/sriovnetworknodepolicy_controller.go b/controllers/sriovnetworknodepolicy_controller.go index 82174cc45..b949b7864 100644 --- a/controllers/sriovnetworknodepolicy_controller.go +++ b/controllers/sriovnetworknodepolicy_controller.go @@ -44,9 +44,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - utils "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/utils" - "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/vars" - dptypes "github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/types" sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1" diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 936546a2c..bc5870f34 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -29,7 +29,6 @@ import ( "go.uber.org/zap/zapcore" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" @@ -45,8 +44,6 @@ import ( //+kubebuilder:scaffold:imports sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1" - constants "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/consts" - "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/platforms" "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/vars" "github.com/k8snetworkplumbingwg/sriov-network-operator/test/util" ) @@ -147,12 +144,15 @@ var _ = BeforeSuite(func() { err = openshiftconfigv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + vars.Config = cfg + vars.Scheme = scheme.Scheme + vars.Namespace = testNamespace + By("creating K8s client") k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) - By("creating default/common k8s objects for tests") // Create test namespace ns := &corev1.Namespace{ diff --git a/deploy/clusterrole.yaml b/deploy/clusterrole.yaml index 870fa9eba..e7a596061 100644 --- a/deploy/clusterrole.yaml +++ b/deploy/clusterrole.yaml @@ -16,7 +16,7 @@ rules: resources: ["daemonsets"] verbs: ["get"] - apiGroups: [""] - resources: [namespaces, serviceaccounts] + resources: ["namespaces", "serviceaccounts"] verbs: ["*"] - apiGroups: ["k8s.cni.cncf.io"] resources: ["network-attachment-definitions"] diff --git a/deployment/sriov-network-operator/templates/clusterrole.yaml b/deployment/sriov-network-operator/templates/clusterrole.yaml index 3b324b32f..7cd8fd014 100644 --- a/deployment/sriov-network-operator/templates/clusterrole.yaml +++ b/deployment/sriov-network-operator/templates/clusterrole.yaml @@ -18,7 +18,7 @@ rules: resources: ["daemonsets"] verbs: ["get"] - apiGroups: [""] - resources: [namespaces, serviceaccounts] + resources: ["namespaces", "serviceaccounts"] verbs: ["*"] - apiGroups: ["k8s.cni.cncf.io"] resources: ["network-attachment-definitions"] diff --git a/main.go b/main.go index 745c39fb4..a656b070d 100644 --- a/main.go +++ b/main.go @@ -53,11 +53,11 @@ import ( "github.com/k8snetworkplumbingwg/sriov-network-operator/controllers" "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/leaderelection" + "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/consts" snolog "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/log" "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/platforms" "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/utils" "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/vars" - "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/consts" //+kubebuilder:scaffold:imports ) @@ -98,6 +98,11 @@ func main() { os.Exit(1) } + if vars.ResourcePrefix == "" { + setupLog.Error(nil, "RESOURCE_PREFIX environment variable can't be empty") + os.Exit(1) + } + le := leaderelection.GetLeaderElectionConfig(kubeClient, enableLeaderElection) mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ diff --git a/pkg/consts/constants.go b/pkg/consts/constants.go index 210a3a5e8..86af30bef 100644 --- a/pkg/consts/constants.go +++ b/pkg/consts/constants.go @@ -65,9 +65,8 @@ const ( DrainIdle = "Idle" DrainRequired = "Drain_Required" RebootRequired = "Reboot_Required" - //DrainMcpPaused = "Draining_MCP_Paused" - Draining = "Draining" - DrainComplete = "DrainComplete" + Draining = "Draining" + DrainComplete = "DrainComplete" SyncStatusSucceeded = "Succeeded" SyncStatusFailed = "Failed" diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index 4575bcf6b..b7c3f81bb 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -493,44 +493,13 @@ func (dn *Daemon) nodeStateSyncHandler() error { } } - if reqDrain || - (utils.ObjectHasAnnotationKey(dn.desiredNodeState, consts.NodeStateDrainAnnotationCurrent) && - !utils.ObjectHasAnnotation(dn.desiredNodeState, consts.NodeStateDrainAnnotationCurrent, consts.DrainIdle)) { - if utils.ObjectHasAnnotation(dn.desiredNodeState, consts.NodeStateDrainAnnotationCurrent, consts.DrainComplete) { - log.Log.Info("nodeStateSyncHandler(): the node complete the draining") - } else if !dn.isNodeDraining() { - if !dn.disableDrain { - if reqReboot { - log.Log.Info("nodeStateSyncHandler(): apply 'Reboot_Required' label for node") - if err := dn.applyRequirement(consts.RebootRequired); err != nil { - return err - } - - log.Log.Info("nodeStateSyncHandler(): apply 'Reboot_Required' label for nodeState") - if err := utils.AnnotateObject(dn.desiredNodeState, - consts.NodeStateDrainAnnotation, - consts.RebootRequired, dn.client); err != nil { - return err - } - - return nil - } - log.Log.Info("nodeStateSyncHandler(): apply 'Drain_Required' label for node") - if err := dn.applyRequirement(consts.DrainRequired); err != nil { - return err - } - - log.Log.Info("nodeStateSyncHandler(): apply 'Drain_Required' label for nodeState") - if err := utils.AnnotateObject(dn.desiredNodeState, - consts.NodeStateDrainAnnotation, - consts.DrainRequired, dn.client); err != nil { - return err - } - return nil - } - } else { - log.Log.Info("nodeStateSyncHandler(): the node is still draining waiting") - return nil + // handle drain only if the plugin request drain, or we are already in a draining request state + if reqDrain || !utils.ObjectHasAnnotation(dn.desiredNodeState, + consts.NodeStateDrainAnnotationCurrent, + consts.DrainIdle) { + if err := dn.handleDrain(reqReboot); err != nil { + log.Log.Error(err, "failed to handle drain") + return err } } @@ -614,6 +583,52 @@ func (dn *Daemon) isNodeDraining() bool { return anno == consts.Draining } +func (dn *Daemon) handleDrain(reqReboot bool) error { + if utils.ObjectHasAnnotation(dn.desiredNodeState, consts.NodeStateDrainAnnotationCurrent, consts.DrainComplete) { + log.Log.Info("handleDrain(): the node complete the draining") + return nil + } + + if dn.isNodeDraining() { + log.Log.Info("handleDrain(): the node is still draining waiting") + return nil + } + + if dn.disableDrain { + log.Log.Info("handleDrain(): drain is disabled in sriovOperatorConfig") + return nil + } + + if reqReboot { + log.Log.Info("handleDrain(): apply 'Reboot_Required' label for node") + if err := dn.applyRequirement(consts.RebootRequired); err != nil { + return err + } + + log.Log.Info("handleDrain(): apply 'Reboot_Required' label for nodeState") + if err := utils.AnnotateObject(dn.desiredNodeState, + consts.NodeStateDrainAnnotation, + consts.RebootRequired, dn.client); err != nil { + return err + } + + return nil + } + log.Log.Info("handleDrain(): apply 'Drain_Required' label for node") + if err := dn.applyRequirement(consts.DrainRequired); err != nil { + return err + } + + log.Log.Info("handleDrain(): apply 'Drain_Required' label for nodeState") + if err := utils.AnnotateObject(dn.desiredNodeState, + consts.NodeStateDrainAnnotation, + consts.DrainRequired, dn.client); err != nil { + return err + } + + return nil +} + func (dn *Daemon) applyRequirement(label string) error { log.Log.Info("applyDrainRequired(): no other node is draining") err := utils.AnnotateNode(vars.NodeName, consts.NodeDrainAnnotation, label, dn.client) diff --git a/pkg/daemon/daemon_test.go b/pkg/daemon/daemon_test.go index f204a864e..253c29112 100644 --- a/pkg/daemon/daemon_test.go +++ b/pkg/daemon/daemon_test.go @@ -32,30 +32,6 @@ import ( "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/vars" ) -var FakeSupportedNicIDs corev1.ConfigMap = corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: sriovnetworkv1.SupportedNicIDConfigmap, - Namespace: vars.Namespace, - }, - Data: map[string]string{ - "Intel_i40e_XXV710": "8086 158a 154c", - "Nvidia_mlx5_ConnectX-4": "15b3 1013 1014", - }, -} - -var SriovDevicePluginPod corev1.Pod = corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "sriov-device-plugin-xxxx", - Namespace: vars.Namespace, - Labels: map[string]string{ - "app": "sriov-device-plugin", - }, - }, - Spec: corev1.PodSpec{ - NodeName: "test-node", - }, -} - func TestConfigDaemon(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Config Daemon Suite") @@ -120,16 +96,6 @@ var _ = Describe("Config Daemon", func() { vars.Namespace = "sriov-network-operator" vars.PlatformType = consts.Baremetal - err = sriovnetworkv1.AddToScheme(scheme.Scheme) - Expect(err).ToNot(HaveOccurred()) - kClient := kclient.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(&corev1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: "test-node"}}, - &sriovnetworkv1.SriovNetworkNodeState{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", - Namespace: vars.Namespace, - }}).Build() - FakeSupportedNicIDs := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: sriovnetworkv1.SupportedNicIDConfigmap, @@ -154,6 +120,16 @@ var _ = Describe("Config Daemon", func() { }, } + err = sriovnetworkv1.AddToScheme(scheme.Scheme) + Expect(err).ToNot(HaveOccurred()) + kClient := kclient.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(&corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "test-node"}}, + &sriovnetworkv1.SriovNetworkNodeState{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Namespace: vars.Namespace, + }}).Build() + kubeClient := fakek8s.NewSimpleClientset(&FakeSupportedNicIDs, &SriovDevicePluginPod) snclient := snclientset.NewSimpleClientset() err = sriovnetworkv1.InitNicIDMapFromConfigMap(kubeClient, vars.Namespace) diff --git a/pkg/drain/drainer.go b/pkg/drain/drainer.go index 943b3f5ec..a3500dc47 100644 --- a/pkg/drain/drainer.go +++ b/pkg/drain/drainer.go @@ -34,19 +34,17 @@ type DrainInterface interface { } type Drainer struct { - resourcePrefix string kubeClient kubernetes.Interface platformHelpers platforms.Interface } -func NewDrainer(resourcePrefix string, platformHelpers platforms.Interface) (DrainInterface, error) { +func NewDrainer(platformHelpers platforms.Interface) (DrainInterface, error) { kclient, err := kubernetes.NewForConfig(vars.Config) if err != nil { return nil, err } return &Drainer{ - resourcePrefix: resourcePrefix, kubeClient: kclient, platformHelpers: platformHelpers, }, err @@ -59,7 +57,7 @@ func (d *Drainer) DrainNode(ctx context.Context, node *corev1.Node, fullNodeDrai reqLogger := log.FromContext(ctx).WithValues("drain node", node.Name) reqLogger.Info("drainNode(): Node drain requested", "node", node.Name) - completed, err := d.platformHelpers.OpenshiftDrainNode(ctx, node) + completed, err := d.platformHelpers.OpenshiftBeforeDrainNode(ctx, node) if err != nil { reqLogger.Error(err, "error running OpenshiftDrainNode") return false, err @@ -70,7 +68,7 @@ func (d *Drainer) DrainNode(ctx context.Context, node *corev1.Node, fullNodeDrai return false, nil } - drainHelper := createDrainHelper(d.kubeClient, ctx, d.resourcePrefix, fullNodeDrain) + drainHelper := createDrainHelper(d.kubeClient, ctx, fullNodeDrain) backoff := wait.Backoff{ Steps: 5, Duration: 10 * time.Second, @@ -113,7 +111,7 @@ func (d *Drainer) CompleteDrainNode(ctx context.Context, node *corev1.Node) (boo // Create drain helper object // full drain is not important here - drainHelper := createDrainHelper(d.kubeClient, ctx, d.resourcePrefix, false) + drainHelper := createDrainHelper(d.kubeClient, ctx, false) // run the un cordon function on the node if err := drain.RunCordonOrUncordon(drainHelper, node, false); err != nil { @@ -123,7 +121,7 @@ func (d *Drainer) CompleteDrainNode(ctx context.Context, node *corev1.Node) (boo // call the openshift complete drain to unpause the MCP // only if we are the last draining node in the pool - completed, err := d.platformHelpers.OpenshiftCompleteDrainNode(ctx, node) + completed, err := d.platformHelpers.OpenshiftAfterCompleteDrainNode(ctx, node) if err != nil { logger.Error(err, "failed to complete openshift draining") return false, err @@ -136,7 +134,7 @@ func (d *Drainer) CompleteDrainNode(ctx context.Context, node *corev1.Node) (boo // createDrainHelper function to create a drain helper // if fullDrain is false we only remove pods that have the resourcePrefix // if not we remove all the pods in the node -func createDrainHelper(kubeClient kubernetes.Interface, ctx context.Context, resourcePrefix string, fullDrain bool) *drain.Helper { +func createDrainHelper(kubeClient kubernetes.Interface, ctx context.Context, fullDrain bool) *drain.Helper { logger := log.FromContext(ctx) drainer := &drain.Helper{ Client: kubeClient, @@ -163,7 +161,7 @@ func createDrainHelper(kubeClient kubernetes.Interface, ctx context.Context, res for _, c := range p.Spec.Containers { if c.Resources.Requests != nil { for r := range c.Resources.Requests { - if strings.HasPrefix(r.String(), resourcePrefix) { + if strings.HasPrefix(r.String(), vars.ResourcePrefix) { return drain.PodDeleteStatus{ Delete: true, Reason: "pod contain SR-IOV device", diff --git a/pkg/platforms/mock/mock_platforms.go b/pkg/platforms/mock/mock_platforms.go index 91c9bc0a9..4218ad045 100644 --- a/pkg/platforms/mock/mock_platforms.go +++ b/pkg/platforms/mock/mock_platforms.go @@ -150,32 +150,32 @@ func (mr *MockInterfaceMockRecorder) IsOpenshiftCluster() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsOpenshiftCluster", reflect.TypeOf((*MockInterface)(nil).IsOpenshiftCluster)) } -// OpenshiftCompleteDrainNode mocks base method. -func (m *MockInterface) OpenshiftCompleteDrainNode(arg0 context.Context, arg1 *v11.Node) (bool, error) { +// OpenshiftAfterCompleteDrainNode mocks base method. +func (m *MockInterface) OpenshiftAfterCompleteDrainNode(arg0 context.Context, arg1 *v11.Node) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OpenshiftCompleteDrainNode", arg0, arg1) + ret := m.ctrl.Call(m, "OpenshiftAfterCompleteDrainNode", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } -// OpenshiftCompleteDrainNode indicates an expected call of OpenshiftCompleteDrainNode. -func (mr *MockInterfaceMockRecorder) OpenshiftCompleteDrainNode(arg0, arg1 interface{}) *gomock.Call { +// OpenshiftAfterCompleteDrainNode indicates an expected call of OpenshiftAfterCompleteDrainNode. +func (mr *MockInterfaceMockRecorder) OpenshiftAfterCompleteDrainNode(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftCompleteDrainNode", reflect.TypeOf((*MockInterface)(nil).OpenshiftCompleteDrainNode), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftAfterCompleteDrainNode", reflect.TypeOf((*MockInterface)(nil).OpenshiftAfterCompleteDrainNode), arg0, arg1) } -// OpenshiftDrainNode mocks base method. -func (m *MockInterface) OpenshiftDrainNode(arg0 context.Context, arg1 *v11.Node) (bool, error) { +// OpenshiftBeforeDrainNode mocks base method. +func (m *MockInterface) OpenshiftBeforeDrainNode(arg0 context.Context, arg1 *v11.Node) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OpenshiftDrainNode", arg0, arg1) + ret := m.ctrl.Call(m, "OpenshiftBeforeDrainNode", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } -// OpenshiftDrainNode indicates an expected call of OpenshiftDrainNode. -func (mr *MockInterfaceMockRecorder) OpenshiftDrainNode(arg0, arg1 interface{}) *gomock.Call { +// OpenshiftBeforeDrainNode indicates an expected call of OpenshiftBeforeDrainNode. +func (mr *MockInterfaceMockRecorder) OpenshiftBeforeDrainNode(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftDrainNode", reflect.TypeOf((*MockInterface)(nil).OpenshiftDrainNode), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftBeforeDrainNode", reflect.TypeOf((*MockInterface)(nil).OpenshiftBeforeDrainNode), arg0, arg1) } diff --git a/pkg/platforms/openshift/mock/mock_openshift.go b/pkg/platforms/openshift/mock/mock_openshift.go index 1f0d1f609..4bafa8ee5 100644 --- a/pkg/platforms/openshift/mock/mock_openshift.go +++ b/pkg/platforms/openshift/mock/mock_openshift.go @@ -108,32 +108,32 @@ func (mr *MockOpenshiftContextInterfaceMockRecorder) IsOpenshiftCluster() *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsOpenshiftCluster", reflect.TypeOf((*MockOpenshiftContextInterface)(nil).IsOpenshiftCluster)) } -// OpenshiftCompleteDrainNode mocks base method. -func (m *MockOpenshiftContextInterface) OpenshiftCompleteDrainNode(arg0 context.Context, arg1 *v10.Node) (bool, error) { +// OpenshiftAfterCompleteDrainNode mocks base method. +func (m *MockOpenshiftContextInterface) OpenshiftAfterCompleteDrainNode(arg0 context.Context, arg1 *v10.Node) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OpenshiftCompleteDrainNode", arg0, arg1) + ret := m.ctrl.Call(m, "OpenshiftAfterCompleteDrainNode", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } -// OpenshiftCompleteDrainNode indicates an expected call of OpenshiftCompleteDrainNode. -func (mr *MockOpenshiftContextInterfaceMockRecorder) OpenshiftCompleteDrainNode(arg0, arg1 interface{}) *gomock.Call { +// OpenshiftAfterCompleteDrainNode indicates an expected call of OpenshiftAfterCompleteDrainNode. +func (mr *MockOpenshiftContextInterfaceMockRecorder) OpenshiftAfterCompleteDrainNode(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftCompleteDrainNode", reflect.TypeOf((*MockOpenshiftContextInterface)(nil).OpenshiftCompleteDrainNode), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftAfterCompleteDrainNode", reflect.TypeOf((*MockOpenshiftContextInterface)(nil).OpenshiftAfterCompleteDrainNode), arg0, arg1) } -// OpenshiftDrainNode mocks base method. -func (m *MockOpenshiftContextInterface) OpenshiftDrainNode(arg0 context.Context, arg1 *v10.Node) (bool, error) { +// OpenshiftBeforeDrainNode mocks base method. +func (m *MockOpenshiftContextInterface) OpenshiftBeforeDrainNode(arg0 context.Context, arg1 *v10.Node) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OpenshiftDrainNode", arg0, arg1) + ret := m.ctrl.Call(m, "OpenshiftBeforeDrainNode", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } -// OpenshiftDrainNode indicates an expected call of OpenshiftDrainNode. -func (mr *MockOpenshiftContextInterfaceMockRecorder) OpenshiftDrainNode(arg0, arg1 interface{}) *gomock.Call { +// OpenshiftBeforeDrainNode indicates an expected call of OpenshiftBeforeDrainNode. +func (mr *MockOpenshiftContextInterfaceMockRecorder) OpenshiftBeforeDrainNode(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftDrainNode", reflect.TypeOf((*MockOpenshiftContextInterface)(nil).OpenshiftDrainNode), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenshiftBeforeDrainNode", reflect.TypeOf((*MockOpenshiftContextInterface)(nil).OpenshiftBeforeDrainNode), arg0, arg1) } diff --git a/pkg/platforms/openshift/openshift.go b/pkg/platforms/openshift/openshift.go index 4aecece60..849fbc234 100644 --- a/pkg/platforms/openshift/openshift.go +++ b/pkg/platforms/openshift/openshift.go @@ -35,8 +35,8 @@ type OpenshiftContextInterface interface { IsOpenshiftCluster() bool IsHypershift() bool - OpenshiftDrainNode(context.Context, *corev1.Node) (bool, error) - OpenshiftCompleteDrainNode(context.Context, *corev1.Node) (bool, error) + OpenshiftBeforeDrainNode(context.Context, *corev1.Node) (bool, error) + OpenshiftAfterCompleteDrainNode(context.Context, *corev1.Node) (bool, error) GetNodeMachinePoolName(context.Context, *corev1.Node) (string, error) ChangeMachineConfigPoolPause(context.Context, *mcv1.MachineConfigPool, bool) error @@ -98,7 +98,7 @@ func (c *openshiftContext) IsHypershift() bool { return c.openshiftFlavor == OpenshiftFlavorHypershift } -func (c *openshiftContext) OpenshiftDrainNode(ctx context.Context, node *corev1.Node) (bool, error) { +func (c *openshiftContext) OpenshiftBeforeDrainNode(ctx context.Context, node *corev1.Node) (bool, error) { // if it's not an openshift cluster we just return true that the operator manage to drain the node if !c.IsOpenshiftCluster() { return true, nil @@ -198,7 +198,7 @@ func (c *openshiftContext) OpenshiftDrainNode(ctx context.Context, node *corev1. return true, nil } -func (c *openshiftContext) OpenshiftCompleteDrainNode(ctx context.Context, node *corev1.Node) (bool, error) { +func (c *openshiftContext) OpenshiftAfterCompleteDrainNode(ctx context.Context, node *corev1.Node) (bool, error) { // if it's not an openshift cluster we just return true that the operator manage to drain the node if !c.IsOpenshiftCluster() { return true, nil diff --git a/pkg/plugins/mellanox/mellanox_plugin.go b/pkg/plugins/mellanox/mellanox_plugin.go index ed9341df3..6b1b943de 100644 --- a/pkg/plugins/mellanox/mellanox_plugin.go +++ b/pkg/plugins/mellanox/mellanox_plugin.go @@ -47,6 +47,10 @@ func (p *MellanoxPlugin) Spec() string { // OnNodeStateChange Invoked when SriovNetworkNodeState CR is created or updated, return if need dain and/or reboot node func (p *MellanoxPlugin) OnNodeStateChange(new *sriovnetworkv1.SriovNetworkNodeState) (needDrain bool, needReboot bool, err error) { log.Log.Info("mellanox plugin OnNodeStateChange()") + + needDrain = false + needReboot = false + err = nil attributesToChange = map[string]mlx.MlxNic{} mellanoxNicsStatus = map[string]map[string]sriovnetworkv1.InterfaceExt{} mellanoxNicsSpec = map[string]sriovnetworkv1.Interface{} diff --git a/pkg/vars/vars.go b/pkg/vars/vars.go index 0564755f1..ce7a6c028 100644 --- a/pkg/vars/vars.go +++ b/pkg/vars/vars.go @@ -60,8 +60,7 @@ var ( // PfPhysPortNameRe regex to find switchdev devices on the host PfPhysPortNameRe = regexp.MustCompile(`p\d+`) - // Namespace contains k8s namespace - Namespace = "" + ResourcePrefix = "" // DisableablePlugins contains which plugins can be disabled in sriov config daemon DisableablePlugins = map[string]struct{}{"mellanox": {}} @@ -84,5 +83,5 @@ func init() { Destdir = destdir } - Namespace = os.Getenv("NAMESPACE") + ResourcePrefix = os.Getenv("RESOURCE_PREFIX") }