Skip to content

Commit

Permalink
Merge pull request #100 from bowei/cleanup
Browse files Browse the repository at this point in the history
Deprecate --use-real-cloud and --verbose flags
  • Loading branch information
nicksardo committed Jan 6, 2018
2 parents e6312d1 + ccd081e commit ca7eaa8
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 54 deletions.
8 changes: 4 additions & 4 deletions cmd/glbc/app/clients.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func NewGCEClient(config io.Reader) *gce.GCECloud {
if err != nil {
glog.Fatalf("Error while reading entire config: %v", err)
}
glog.V(2).Infof("Using cloudprovider config file:\n%v ", string(allConfig))
glog.V(4).Infof("Using cloudprovider config file: %q", string(allConfig))

getConfigReader = func() io.Reader {
return bytes.NewReader(allConfig)
Expand All @@ -83,9 +83,9 @@ func NewGCEClient(config io.Reader) *gce.GCECloud {
// No errors are thrown. So we need to keep retrying till it works because
// we know we're on GCE.
for {
cloudInterface, err := cloudprovider.GetCloudProvider("gce", getConfigReader())
provider, err := cloudprovider.GetCloudProvider("gce", getConfigReader())
if err == nil {
cloud := cloudInterface.(*gce.GCECloud)
cloud := provider.(*gce.GCECloud)

// If this controller is scheduled on a node without compute/rw
// it won't be allowed to list backends. We can assume that the
Expand All @@ -98,7 +98,7 @@ func NewGCEClient(config io.Reader) *gce.GCECloud {
}
glog.Warningf("Failed to list backend services, retrying: %v", err)
} else {
glog.Warningf("Failed to retrieve cloud interface, retrying: %v", err)
glog.Warningf("Failed to get cloud provider, retrying: %v", err)
}
time.Sleep(cloudClientRetryInterval)
}
Expand Down
16 changes: 7 additions & 9 deletions cmd/glbc/app/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ var (
InCluster bool
KubeConfigFile string
ResyncPeriod time.Duration
UseRealCloud bool
Verbose bool
WatchNamespace string
}{}
Expand Down Expand Up @@ -79,14 +78,13 @@ the pod secrets for creating a Kubernetes client.`)
`Path to kubeconfig file with authorization and master location information.`)
flag.DurationVar(&Flags.ResyncPeriod, "sync-period", 30*time.Second,
`Relist and confirm cloud resources this often.`)
// TODO: Consolidate this flag and running-in-cluster. People already use
// the first one to mean "running in dev", unfortunately.
flag.BoolVar(&Flags.UseRealCloud, "use-real-cloud", false,
`Optional, if set a real cloud client is created. Only matters with
--running-in-cluster=false, i.e a real cloud is always used when this controller
is running on a Kubernetes node.`)
flag.BoolVar(&Flags.Verbose, "verbose", false,
`If true, logs are displayed at V(4), otherwise V(2).`)
flag.StringVar(&Flags.WatchNamespace, "watch-namespace", v1.NamespaceAll,
`Namespace to watch for Ingress/Services/Endpoints.`)

// Deprecated flags.
flag.BoolVar(&Flags.Verbose, "verbose", false,
`This flag is deprecated. Use -v to control verbosity.`)
flag.Bool("use-real-cloud", false,
`This flag has been deprecated and no longer has any effect.`)

}
2 changes: 2 additions & 0 deletions cmd/glbc/app/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,15 @@ func RunHTTPServer(lbc *controller.LoadBalancerController) {
lbc.Stop(true)
})

glog.V(0).Infof("Running http server on :%v", Flags.HealthzPort)
glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", Flags.HealthzPort), nil))
}

func RunSIGTERMHandler(lbc *controller.LoadBalancerController, deleteAll bool) {
// Multiple SIGTERMs will get dropped
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
glog.V(0).Infof("SIGTERM handler registered")
<-signalChan
glog.Infof("Received SIGTERM, shutting down")

Expand Down
74 changes: 36 additions & 38 deletions cmd/glbc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"k8s.io/ingress-gce/pkg/context"
"k8s.io/ingress-gce/pkg/controller"
neg "k8s.io/ingress-gce/pkg/networkendpointgroup"
"k8s.io/ingress-gce/pkg/utils"

"k8s.io/ingress-gce/cmd/glbc/app"
)
Expand All @@ -54,52 +53,47 @@ func main() {
}

glog.V(0).Infof("Starting GLBC image: %q, cluster name %q", imageVersion, app.Flags.ClusterName)
for i, a := range os.Args {
glog.V(0).Infof("argv[%d]: %q", i, a)
}

kubeClient, err := app.NewKubeClient()
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
glog.Fatalf("Failed to create kubernetes client: %v", err)
}

var namer *utils.Namer
var cloud *gce.GCECloud
var clusterManager *controller.ClusterManager

if app.Flags.InCluster || app.Flags.UseRealCloud {
namer, err = app.NewNamer(kubeClient, app.Flags.ClusterName, controller.DefaultFirewallName)
if err != nil {
glog.Fatalf("%v", err)
}

// TODO: Make this more resilient. Currently we create the cloud client
// and pass it through to all the pools. This makes unit testing easier.
// However if the cloud client suddenly fails, we should try to re-create it
// and continue.
if app.Flags.ConfigFilePath != "" {
glog.Infof("Reading config from path %v", app.Flags.ConfigFilePath)
config, err := os.Open(app.Flags.ConfigFilePath)
if err != nil {
glog.Fatalf("%v", err)
}
defer config.Close()
cloud = app.NewGCEClient(config)
glog.Infof("Successfully loaded cloudprovider using config %q", app.Flags.ConfigFilePath)
} else {
// TODO: refactor so this comment does not need to be here.
// While you might be tempted to refactor so we simply assing nil to the
// config and only invoke getGCEClient once, that will not do the right
// thing because a nil check against an interface isn't true in golang.
cloud = app.NewGCEClient(nil)
glog.Infof("Created GCE client without a config file")
}
namer, err := app.NewNamer(kubeClient, app.Flags.ClusterName, controller.DefaultFirewallName)
if err != nil {
glog.Fatalf("%v", err)
}

defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient)
clusterManager, err = controller.NewClusterManager(cloud, namer, *defaultBackendServicePort, app.Flags.HealthCheckPath)
var cloud *gce.GCECloud
// TODO: Make this more resilient. Currently we create the cloud client
// and pass it through to all the pools. This makes unit testing easier.
// However if the cloud client suddenly fails, we should try to re-create it
// and continue.
if app.Flags.ConfigFilePath != "" {
glog.Infof("Reading config from path %q", app.Flags.ConfigFilePath)
config, err := os.Open(app.Flags.ConfigFilePath)
if err != nil {
glog.Fatalf("%v", err)
}
defer config.Close()
cloud = app.NewGCEClient(config)
glog.Infof("Successfully loaded cloudprovider using config %q", app.Flags.ConfigFilePath)
} else {
// Create fake cluster manager
clusterManager = controller.NewFakeClusterManager(app.Flags.ClusterName, controller.DefaultFirewallName).ClusterManager
// TODO: refactor so this comment does not need to be here.
// While you might be tempted to refactor so we simply assing nil to the
// config and only invoke getGCEClient once, that will not do the right
// thing because a nil check against an interface isn't true in golang.
cloud = app.NewGCEClient(nil)
glog.Infof("Created GCE client without a config file")
}

defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient)
clusterManager, err := controller.NewClusterManager(cloud, namer, *defaultBackendServicePort, app.Flags.HealthCheckPath)
if err != nil {
glog.Fatalf("Error creating cluster manager: %v", err)
}

enableNEG := cloud.AlphaFeatureGate.Enabled(gce.AlphaFeatureNetworkEndpointGroup)
Expand All @@ -108,23 +102,27 @@ func main() {
// Start loadbalancer controller
lbc, err := controller.NewLoadBalancerController(kubeClient, stopCh, ctx, clusterManager, enableNEG)
if err != nil {
glog.Fatalf("%v", err)
glog.Fatalf("Error creating load balancer controller: %v", err)
}

if clusterManager.ClusterNamer.UID() != "" {
glog.V(0).Infof("Cluster name is %+v", clusterManager.ClusterNamer.UID())
}
clusterManager.Init(lbc.Translator, lbc.Translator)
glog.V(0).Infof("clusterManager initialized")

if enableNEG {
negController, _ := neg.NewController(kubeClient, cloud, ctx, lbc.Translator, namer, app.Flags.ResyncPeriod)
go negController.Run(stopCh)
glog.V(0).Infof("negController started")
}

go app.RunHTTPServer(lbc)
go app.RunSIGTERMHandler(lbc, app.Flags.DeleteAllOnQuit)

ctx.Start(stopCh)

glog.V(0).Infof("Starting load balancer controller")
lbc.Run()

for {
Expand Down
12 changes: 9 additions & 3 deletions pkg/utils/namer.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,10 +131,16 @@ func (n *Namer) SetUID(name string) {

if strings.Contains(name, clusterNameDelimiter) {
tokens := strings.Split(name, clusterNameDelimiter)
glog.Warningf("Given name %v contains %v, taking last token in: %+v", name, clusterNameDelimiter, tokens)
glog.Warningf("Name %q contains %q, taking last token in: %+v", name, clusterNameDelimiter, tokens)
name = tokens[len(tokens)-1]
}
glog.Infof("Changing cluster name from %v to %v", n.clusterName, name)

if n.clusterName == name {
glog.V(4).Infof("Cluster name is unchanged (%q)", name)
return
}

glog.Infof("Changing cluster name from %q to %q", n.clusterName, name)
n.clusterName = name
}

Expand All @@ -144,7 +150,7 @@ func (n *Namer) SetFirewall(name string) {
defer n.nameLock.Unlock()

if n.firewallName != name {
glog.Infof("Changing firewall name from %v to %v", n.firewallName, name)
glog.Infof("Changing firewall name from %q to %q", n.firewallName, name)
n.firewallName = name
}
}
Expand Down

0 comments on commit ca7eaa8

Please sign in to comment.