From ccd081e9906403ef16b7853b1bf0d8e89c6cacd7 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Thu, 28 Dec 2017 15:38:28 -0800 Subject: [PATCH] Deprecate --use-real-cloud and --verbose flags - Includes some minor logging cleanups --- cmd/glbc/app/clients.go | 8 ++--- cmd/glbc/app/flags.go | 16 ++++----- cmd/glbc/app/handlers.go | 2 ++ cmd/glbc/main.go | 74 +++++++++++++++++++--------------------- pkg/utils/namer.go | 12 +++++-- 5 files changed, 58 insertions(+), 54 deletions(-) diff --git a/cmd/glbc/app/clients.go b/cmd/glbc/app/clients.go index 817509a79d..429773a78c 100644 --- a/cmd/glbc/app/clients.go +++ b/cmd/glbc/app/clients.go @@ -69,7 +69,7 @@ func NewGCEClient(config io.Reader) *gce.GCECloud { if err != nil { glog.Fatalf("Error while reading entire config: %v", err) } - glog.V(2).Infof("Using cloudprovider config file:\n%v ", string(allConfig)) + glog.V(4).Infof("Using cloudprovider config file: %q", string(allConfig)) getConfigReader = func() io.Reader { return bytes.NewReader(allConfig) @@ -83,9 +83,9 @@ func NewGCEClient(config io.Reader) *gce.GCECloud { // No errors are thrown. So we need to keep retrying till it works because // we know we're on GCE. for { - cloudInterface, err := cloudprovider.GetCloudProvider("gce", getConfigReader()) + provider, err := cloudprovider.GetCloudProvider("gce", getConfigReader()) if err == nil { - cloud := cloudInterface.(*gce.GCECloud) + cloud := provider.(*gce.GCECloud) // If this controller is scheduled on a node without compute/rw // it won't be allowed to list backends. We can assume that the @@ -98,7 +98,7 @@ func NewGCEClient(config io.Reader) *gce.GCECloud { } glog.Warningf("Failed to list backend services, retrying: %v", err) } else { - glog.Warningf("Failed to retrieve cloud interface, retrying: %v", err) + glog.Warningf("Failed to get cloud provider, retrying: %v", err) } time.Sleep(cloudClientRetryInterval) } diff --git a/cmd/glbc/app/flags.go b/cmd/glbc/app/flags.go index d28013ca3d..2d0d1a15b6 100644 --- a/cmd/glbc/app/flags.go +++ b/cmd/glbc/app/flags.go @@ -37,7 +37,6 @@ var ( InCluster bool KubeConfigFile string ResyncPeriod time.Duration - UseRealCloud bool Verbose bool WatchNamespace string }{} @@ -79,14 +78,13 @@ the pod secrets for creating a Kubernetes client.`) `Path to kubeconfig file with authorization and master location information.`) flag.DurationVar(&Flags.ResyncPeriod, "sync-period", 30*time.Second, `Relist and confirm cloud resources this often.`) - // TODO: Consolidate this flag and running-in-cluster. People already use - // the first one to mean "running in dev", unfortunately. - flag.BoolVar(&Flags.UseRealCloud, "use-real-cloud", false, - `Optional, if set a real cloud client is created. Only matters with ---running-in-cluster=false, i.e a real cloud is always used when this controller -is running on a Kubernetes node.`) - flag.BoolVar(&Flags.Verbose, "verbose", false, - `If true, logs are displayed at V(4), otherwise V(2).`) flag.StringVar(&Flags.WatchNamespace, "watch-namespace", v1.NamespaceAll, `Namespace to watch for Ingress/Services/Endpoints.`) + + // Deprecated flags. + flag.BoolVar(&Flags.Verbose, "verbose", false, + `This flag is deprecated. Use -v to control verbosity.`) + flag.Bool("use-real-cloud", false, + `This flag has been deprecated and no longer has any effect.`) + } diff --git a/cmd/glbc/app/handlers.go b/cmd/glbc/app/handlers.go index b6c203b580..157e79e34f 100644 --- a/cmd/glbc/app/handlers.go +++ b/cmd/glbc/app/handlers.go @@ -44,6 +44,7 @@ func RunHTTPServer(lbc *controller.LoadBalancerController) { lbc.Stop(true) }) + glog.V(0).Infof("Running http server on :%v", Flags.HealthzPort) glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", Flags.HealthzPort), nil)) } @@ -51,6 +52,7 @@ func RunSIGTERMHandler(lbc *controller.LoadBalancerController, deleteAll bool) { // Multiple SIGTERMs will get dropped signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGTERM) + glog.V(0).Infof("SIGTERM handler registered") <-signalChan glog.Infof("Received SIGTERM, shutting down") diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index 870c5962c9..5e8e4c8caf 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -28,7 +28,6 @@ import ( "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/controller" neg "k8s.io/ingress-gce/pkg/networkendpointgroup" - "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/cmd/glbc/app" ) @@ -54,52 +53,47 @@ func main() { } glog.V(0).Infof("Starting GLBC image: %q, cluster name %q", imageVersion, app.Flags.ClusterName) + for i, a := range os.Args { + glog.V(0).Infof("argv[%d]: %q", i, a) + } kubeClient, err := app.NewKubeClient() if err != nil { - glog.Fatalf("Failed to create client: %v", err) + glog.Fatalf("Failed to create kubernetes client: %v", err) } - var namer *utils.Namer - var cloud *gce.GCECloud - var clusterManager *controller.ClusterManager - - if app.Flags.InCluster || app.Flags.UseRealCloud { - namer, err = app.NewNamer(kubeClient, app.Flags.ClusterName, controller.DefaultFirewallName) - if err != nil { - glog.Fatalf("%v", err) - } - - // TODO: Make this more resilient. Currently we create the cloud client - // and pass it through to all the pools. This makes unit testing easier. - // However if the cloud client suddenly fails, we should try to re-create it - // and continue. - if app.Flags.ConfigFilePath != "" { - glog.Infof("Reading config from path %v", app.Flags.ConfigFilePath) - config, err := os.Open(app.Flags.ConfigFilePath) - if err != nil { - glog.Fatalf("%v", err) - } - defer config.Close() - cloud = app.NewGCEClient(config) - glog.Infof("Successfully loaded cloudprovider using config %q", app.Flags.ConfigFilePath) - } else { - // TODO: refactor so this comment does not need to be here. - // While you might be tempted to refactor so we simply assing nil to the - // config and only invoke getGCEClient once, that will not do the right - // thing because a nil check against an interface isn't true in golang. - cloud = app.NewGCEClient(nil) - glog.Infof("Created GCE client without a config file") - } + namer, err := app.NewNamer(kubeClient, app.Flags.ClusterName, controller.DefaultFirewallName) + if err != nil { + glog.Fatalf("%v", err) + } - defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient) - clusterManager, err = controller.NewClusterManager(cloud, namer, *defaultBackendServicePort, app.Flags.HealthCheckPath) + var cloud *gce.GCECloud + // TODO: Make this more resilient. Currently we create the cloud client + // and pass it through to all the pools. This makes unit testing easier. + // However if the cloud client suddenly fails, we should try to re-create it + // and continue. + if app.Flags.ConfigFilePath != "" { + glog.Infof("Reading config from path %q", app.Flags.ConfigFilePath) + config, err := os.Open(app.Flags.ConfigFilePath) if err != nil { glog.Fatalf("%v", err) } + defer config.Close() + cloud = app.NewGCEClient(config) + glog.Infof("Successfully loaded cloudprovider using config %q", app.Flags.ConfigFilePath) } else { - // Create fake cluster manager - clusterManager = controller.NewFakeClusterManager(app.Flags.ClusterName, controller.DefaultFirewallName).ClusterManager + // TODO: refactor so this comment does not need to be here. + // While you might be tempted to refactor so we simply assing nil to the + // config and only invoke getGCEClient once, that will not do the right + // thing because a nil check against an interface isn't true in golang. + cloud = app.NewGCEClient(nil) + glog.Infof("Created GCE client without a config file") + } + + defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient) + clusterManager, err := controller.NewClusterManager(cloud, namer, *defaultBackendServicePort, app.Flags.HealthCheckPath) + if err != nil { + glog.Fatalf("Error creating cluster manager: %v", err) } enableNEG := cloud.AlphaFeatureGate.Enabled(gce.AlphaFeatureNetworkEndpointGroup) @@ -108,23 +102,27 @@ func main() { // Start loadbalancer controller lbc, err := controller.NewLoadBalancerController(kubeClient, stopCh, ctx, clusterManager, enableNEG) if err != nil { - glog.Fatalf("%v", err) + glog.Fatalf("Error creating load balancer controller: %v", err) } if clusterManager.ClusterNamer.UID() != "" { glog.V(0).Infof("Cluster name is %+v", clusterManager.ClusterNamer.UID()) } clusterManager.Init(lbc.Translator, lbc.Translator) + glog.V(0).Infof("clusterManager initialized") if enableNEG { negController, _ := neg.NewController(kubeClient, cloud, ctx, lbc.Translator, namer, app.Flags.ResyncPeriod) go negController.Run(stopCh) + glog.V(0).Infof("negController started") } go app.RunHTTPServer(lbc) go app.RunSIGTERMHandler(lbc, app.Flags.DeleteAllOnQuit) ctx.Start(stopCh) + + glog.V(0).Infof("Starting load balancer controller") lbc.Run() for { diff --git a/pkg/utils/namer.go b/pkg/utils/namer.go index 0caa1e6cde..af6af7da4f 100644 --- a/pkg/utils/namer.go +++ b/pkg/utils/namer.go @@ -131,10 +131,16 @@ func (n *Namer) SetUID(name string) { if strings.Contains(name, clusterNameDelimiter) { tokens := strings.Split(name, clusterNameDelimiter) - glog.Warningf("Given name %v contains %v, taking last token in: %+v", name, clusterNameDelimiter, tokens) + glog.Warningf("Name %q contains %q, taking last token in: %+v", name, clusterNameDelimiter, tokens) name = tokens[len(tokens)-1] } - glog.Infof("Changing cluster name from %v to %v", n.clusterName, name) + + if n.clusterName == name { + glog.V(4).Infof("Cluster name is unchanged (%q)", name) + return + } + + glog.Infof("Changing cluster name from %q to %q", n.clusterName, name) n.clusterName = name } @@ -144,7 +150,7 @@ func (n *Namer) SetFirewall(name string) { defer n.nameLock.Unlock() if n.firewallName != name { - glog.Infof("Changing firewall name from %v to %v", n.firewallName, name) + glog.Infof("Changing firewall name from %q to %q", n.firewallName, name) n.firewallName = name } }