From 18092bd28967470bef43ba2c2c2ea61bab6e3d57 Mon Sep 17 00:00:00 2001 From: shalper2 <99686388+shalper2@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:57:46 -0600 Subject: [PATCH] Splunkent update conf (#31183) **Description:** Make changes to configuration of the application to allow the user to specify endpoints corresponding to different Splunk node types. Specifically, this update will allow users to define three separate clients: indexer, cluster master, and search head. This change will allow for the addition of metrics corresponding to these different modes of operation within the Splunk enterprise deployment. **Link to tracking Issue:** [30254](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30254) **Testing:** Unit tests were updated to run against new configuration options. **Documentation:** Updated README to reflect the new changes in configuration. --- .chloggen/splunkent-update-conf.yaml | 27 ++++ receiver/splunkenterprisereceiver/README.md | 34 +++- receiver/splunkenterprisereceiver/client.go | 146 +++++++++++++++--- .../splunkenterprisereceiver/client_test.go | 24 ++- receiver/splunkenterprisereceiver/config.go | 51 ++++-- .../splunkenterprisereceiver/config_test.go | 79 +++++++++- receiver/splunkenterprisereceiver/factory.go | 5 +- .../splunkenterprisereceiver/factory_test.go | 8 +- receiver/splunkenterprisereceiver/scraper.go | 135 ++++++++-------- .../splunkenterprisereceiver/scraper_test.go | 23 +-- .../testdata/config.yaml | 18 ++- 11 files changed, 407 insertions(+), 143 deletions(-) create mode 100755 .chloggen/splunkent-update-conf.yaml diff --git a/.chloggen/splunkent-update-conf.yaml b/.chloggen/splunkent-update-conf.yaml new file mode 100755 index 000000000000..8fb017005b6f --- /dev/null +++ b/.chloggen/splunkent-update-conf.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: splunkentreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Updated the config.go and propogated these changes to other receiver components. Change was necessary to differentiate different configurable endpoints." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30254] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md index e45319366868..969b6eb0d694 100644 --- a/receiver/splunkenterprisereceiver/README.md +++ b/receiver/splunkenterprisereceiver/README.md @@ -8,7 +8,9 @@ jobs. ## Configuration -The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. +The following settings are required, omitting them will either cause your receiver to fail to compile or result in 4/5xx return codes during scraping. + +**NOTE:** These must be set for each Splunk instance type (indexer, search head, or cluster master) from which you wish to pull metrics. At present, only one of each type is accepted, per configured receiver instance. This means, for example, that if you have three different "indexer" type instances that you would like to pull metrics from you will need to configure three different `splunkenterprise` receivers for each indexer node you wish to monitor. * `basicauth` (from [basicauthextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/basicauthextension)): A configured stanza for the basicauthextension. * `auth` (no default): String name referencing your auth extension. @@ -23,16 +25,38 @@ Example: ```yaml extensions: - basicauth/client: + basicauth/indexer: + client_auth: + username: admin + password: securityFirst + basicauth/cluster_master: client_auth: username: admin password: securityFirst receivers: splunkenterprise: - auth: basicauth/client - endpoint: "https://localhost:8089" - timeout: 45s + indexer: + auth: + authenticator: basicauth/indexer + endpoint: "https://localhost:8089" + timeout: 45s + cluster_master: + auth: + authenticator: basicauth/cluster_master + endpoint: "https://localhost:8089" + timeout: 45s + +exporters: + logging: + loglevel: info + +service: + extensions: [basicauth/indexer, basicauth/cluster_master] + pipelines: + metrics: + receivers: [splunkenterprise] + exporters: [logging] ``` For a full list of settings exposed by this receiver please look [here](./config.go) with a detailed configuration [here](./testdata/config.yaml). diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go index dcd1ee89f48c..6c0bb419f766 100644 --- a/receiver/splunkenterprisereceiver/client.go +++ b/receiver/splunkenterprisereceiver/client.go @@ -5,6 +5,7 @@ package splunkenterprisereceiver // import "github.com/open-telemetry/openteleme import ( "context" + "errors" "fmt" "net/http" "net/url" @@ -13,38 +14,111 @@ import ( "go.opentelemetry.io/collector/component" ) +// Indexer type "enum". Included in context sent from scraper functions +const ( + typeIdx = "IDX" + typeSh = "SH" + typeCm = "CM" +) + +var ( + errCtxMissingEndpointType = errors.New("context was passed without the endpoint type included") + errEndpointTypeNotFound = errors.New("requested client is not configured and could not be found in splunkEntClient") + errNoClientFound = errors.New("no client corresponding to the endpoint type was found") +) + +// Type wrapper for accessing context value +type endpointType string + +// Wrapper around splunkClientMap to avoid awkward reference/dereference stuff that arises when using maps in golang type splunkEntClient struct { + clients splunkClientMap +} + +// The splunkEntClient is made up of a number of splunkClients defined for each configured endpoint +type splunkClientMap map[any]splunkClient + +// The client does not carry the endpoint that is configured with it and golang does not support mixed +// type arrays so this struct contains the pair: the client configured for the endpoint and the endpoint +// itself +type splunkClient struct { client *http.Client endpoint *url.URL } func newSplunkEntClient(cfg *Config, h component.Host, s component.TelemetrySettings) (*splunkEntClient, error) { - client, err := cfg.ClientConfig.ToClient(h, s) - if err != nil { - return nil, err + var err error + var e *url.URL + var c *http.Client + clientMap := make(splunkClientMap) + + // if the endpoint is defined, put it in the endpoints map for later use + // we already checked that url.Parse does not fail in cfg.Validate() + if cfg.IdxEndpoint.Endpoint != "" { + e, _ = url.Parse(cfg.IdxEndpoint.Endpoint) + c, err = cfg.IdxEndpoint.ToClient(h, s) + if err != nil { + return nil, err + } + clientMap[typeIdx] = splunkClient{ + client: c, + endpoint: e, + } + } + if cfg.SHEndpoint.Endpoint != "" { + e, _ = url.Parse(cfg.SHEndpoint.Endpoint) + c, err = cfg.SHEndpoint.ToClient(h, s) + if err != nil { + return nil, err + } + clientMap[typeSh] = splunkClient{ + client: c, + endpoint: e, + } + } + if cfg.CMEndpoint.Endpoint != "" { + e, _ = url.Parse(cfg.CMEndpoint.Endpoint) + c, err = cfg.CMEndpoint.ToClient(h, s) + if err != nil { + return nil, err + } + clientMap[typeCm] = splunkClient{ + client: c, + endpoint: e, + } } - endpoint, _ := url.Parse(cfg.Endpoint) - - return &splunkEntClient{ - client: client, - endpoint: endpoint, - }, nil + return &splunkEntClient{clients: clientMap}, nil } // For running ad hoc searches only -func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) (*http.Request, error) { +func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) (req *http.Request, err error) { + // get endpoint type from the context + eptType := ctx.Value(endpointType("type")) + if eptType == nil { + return nil, errCtxMissingEndpointType + } + // Running searches via Splunk's REST API is a two step process: First you submit the job to run // this returns a jobid which is then used in the second part to retrieve the search results if sr.Jobid == nil { + var u string path := "/services/search/jobs/" - url, _ := url.JoinPath(c.endpoint.String(), path) + + if e, ok := c.clients[eptType]; ok { + u, err = url.JoinPath(e.endpoint.String(), path) + if err != nil { + return nil, err + } + } else { + return nil, errNoClientFound + } // reader for the response data data := strings.NewReader(sr.search) // return the build request, ready to be run by makeRequest - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, data) + req, err = http.NewRequestWithContext(ctx, http.MethodPost, u, data) if err != nil { return nil, err } @@ -52,9 +126,9 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) return req, nil } path := fmt.Sprintf("/services/search/jobs/%s/results", *sr.Jobid) - url, _ := url.JoinPath(c.endpoint.String(), path) + url, _ := url.JoinPath(c.clients[eptType].endpoint.String(), path) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err = http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err } @@ -62,10 +136,23 @@ func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) return req, nil } -func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint string) (*http.Request, error) { - url := c.endpoint.String() + apiEndpoint +// forms an *http.Request for use with Splunk built-in API's (like introspection). +func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint string) (req *http.Request, err error) { + var u string + + // get endpoint type from the context + eptType := ctx.Value(endpointType("type")) + if eptType == nil { + return nil, errCtxMissingEndpointType + } + + if e, ok := c.clients[eptType]; ok { + u = e.endpoint.String() + apiEndpoint + } else { + return nil, errNoClientFound + } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err = http.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, err } @@ -73,13 +160,26 @@ func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint stri return req, nil } -// Construct and perform a request to the API. Returns the searchResponse passed into the -// function as state +// Perform a request. func (c *splunkEntClient) makeRequest(req *http.Request) (*http.Response, error) { - res, err := c.client.Do(req) - if err != nil { - return nil, err + // get endpoint type from the context + eptType := req.Context().Value(endpointType("type")) + if eptType == nil { + return nil, errCtxMissingEndpointType } + if sc, ok := c.clients[eptType]; ok { + res, err := sc.client.Do(req) + if err != nil { + return nil, err + } + return res, nil + } + return nil, errEndpointTypeNotFound +} - return res, nil +// Check if the splunkEntClient contains a configured endpoint for the type of scraper +// Returns true if an entry exists, false if not. +func (c *splunkEntClient) isConfigured(v string) bool { + _, ok := c.clients[v] + return ok } diff --git a/receiver/splunkenterprisereceiver/client_test.go b/receiver/splunkenterprisereceiver/client_test.go index 83ea4caf1bb4..8dd873da9f02 100644 --- a/receiver/splunkenterprisereceiver/client_test.go +++ b/receiver/splunkenterprisereceiver/client_test.go @@ -34,11 +34,9 @@ func (m *mockHost) GetExtensions() map[component.ID]component.Component { func TestClientCreation(t *testing.T) { cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.ClientConfig{ Endpoint: "https://localhost:8089", - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, + Auth: &configauth.Authentication{AuthenticatorID: component.MustNewIDWithName("basicauth", "client")}, }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, @@ -58,18 +56,16 @@ func TestClientCreation(t *testing.T) { testEndpoint, _ := url.Parse("https://localhost:8089") - require.Equal(t, client.endpoint, testEndpoint) + require.Equal(t, testEndpoint, client.clients[typeIdx].endpoint) } // test functionality of createRequest which is used for building metrics out of // ad-hoc searches func TestClientCreateRequest(t *testing.T) { cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.ClientConfig{ Endpoint: "https://localhost:8089", - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, + Auth: &configauth.Authentication{AuthenticatorID: component.MustNewIDWithName("basicauth", "client")}, }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, @@ -131,6 +127,7 @@ func TestClientCreateRequest(t *testing.T) { } ctx := context.Background() + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) for _, test := range tests { t.Run(test.desc, func(t *testing.T) { req, err := test.client.createRequest(ctx, test.sr) @@ -147,11 +144,9 @@ func TestClientCreateRequest(t *testing.T) { // createAPIRequest creates a request for api calls i.e. to introspection endpoint func TestAPIRequestCreate(t *testing.T) { cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.ClientConfig{ Endpoint: "https://localhost:8089", - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, + Auth: &configauth.Authentication{AuthenticatorID: component.MustNewIDWithName("basicauth", "client")}, }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, @@ -171,11 +166,12 @@ func TestAPIRequestCreate(t *testing.T) { require.NoError(t, err) ctx := context.Background() + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) req, err := client.createAPIRequest(ctx, "/test/endpoint") require.NoError(t, err) // build the expected request - expectedURL := client.endpoint.String() + "/test/endpoint" + expectedURL := client.clients[typeIdx].endpoint.String() + "/test/endpoint" expected, _ := http.NewRequest(http.MethodGet, expectedURL, nil) require.Equal(t, expected.URL, req.URL) diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index 9720851aa21d..f9ac3488aa65 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -22,32 +22,55 @@ var ( ) type Config struct { - confighttp.ClientConfig `mapstructure:",squash"` scraperhelper.ScraperControllerSettings `mapstructure:",squash"` metadata.MetricsBuilderConfig `mapstructure:",squash"` + IdxEndpoint confighttp.ClientConfig `mapstructure:"indexer"` + SHEndpoint confighttp.ClientConfig `mapstructure:"search_head"` + CMEndpoint confighttp.ClientConfig `mapstructure:"cluster_master"` } func (cfg *Config) Validate() (errors error) { var targetURL *url.URL + var err error + endpoints := []string{} - if cfg.Endpoint == "" { + // if no endpoint is set we do not start the receiver. For each set endpoint we go through and Validate + // that it contains an auth setting and a valid endpoint, if its missing either of these the receiver will + // fail to start. + if cfg.IdxEndpoint.Endpoint == "" && cfg.SHEndpoint.Endpoint == "" && cfg.CMEndpoint.Endpoint == "" { errors = multierr.Append(errors, errBadOrMissingEndpoint) } else { - // we want to validate that the endpoint url supplied by user is at least - // a little bit valid - var err error - targetURL, err = url.Parse(cfg.Endpoint) - if err != nil { - errors = multierr.Append(errors, errBadOrMissingEndpoint) + if cfg.IdxEndpoint.Endpoint != "" { + if cfg.IdxEndpoint.Auth == nil { + errors = multierr.Append(errors, errMissingAuthExtension) + } + endpoints = append(endpoints, cfg.IdxEndpoint.Endpoint) } - - if !strings.HasPrefix(targetURL.Scheme, "http") { - errors = multierr.Append(errors, errBadScheme) + if cfg.SHEndpoint.Endpoint != "" { + if cfg.SHEndpoint.Auth == nil { + errors = multierr.Append(errors, errMissingAuthExtension) + } + endpoints = append(endpoints, cfg.SHEndpoint.Endpoint) + } + if cfg.CMEndpoint.Endpoint != "" { + if cfg.CMEndpoint.Auth == nil { + errors = multierr.Append(errors, errMissingAuthExtension) + } + endpoints = append(endpoints, cfg.CMEndpoint.Endpoint) } - } - if cfg.ClientConfig.Auth.AuthenticatorID.Name() == "" { - errors = multierr.Append(errors, errMissingAuthExtension) + for _, e := range endpoints { + targetURL, err = url.Parse(e) + if err != nil { + errors = multierr.Append(errors, errBadOrMissingEndpoint) + continue + } + + // note passes for both http and https + if !strings.HasPrefix(targetURL.Scheme, "http") { + errors = multierr.Append(errors, errBadScheme) + } + } } return errors diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index 8079fc04a3e0..1693c940d6eb 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -9,13 +9,14 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configauth" + "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap/confmaptest" + "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" ) -// Since there are no custom fields in config the existing tests for the components should -// cover the testing requirement. func TestLoadConfig(t *testing.T) { t.Parallel() @@ -25,3 +26,77 @@ func TestLoadConfig(t *testing.T) { _, err = cm.Sub(id.String()) require.NoError(t, err) } + +func TestEndpointCorrectness(t *testing.T) { + // Declare errors for tests that should fail + var errBad, errScheme error + // Error for bad or missing endpoint + errBad = multierr.Append(errBad, errBadOrMissingEndpoint) + // There is no way with the current SDK design to create a test config that + // satisfies the auth extension so we will just expect this error to appear. + errBad = multierr.Append(errBad, errMissingAuthExtension) + + // Error related to bad scheme (not http/s) + errScheme = multierr.Append(errScheme, errBadScheme) + errScheme = multierr.Append(errScheme, errMissingAuthExtension) + + tests := []struct { + desc string + expected error + config *Config + }{ + { + desc: "missing any endpoint setting", + expected: errBad, + config: &Config{ + IdxEndpoint: confighttp.ClientConfig{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + }, + SHEndpoint: confighttp.ClientConfig{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + }, + CMEndpoint: confighttp.ClientConfig{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + }, + }, + }, + { + desc: "properly configured invalid endpoint", + expected: errBad, + config: &Config{ + IdxEndpoint: confighttp.ClientConfig{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + Endpoint: "123.321.12.1:1", + }, + }, + }, + { + desc: "properly configured endpoint has bad scheme", + expected: errScheme, + config: &Config{ + IdxEndpoint: confighttp.ClientConfig{ + Auth: &configauth.Authentication{AuthenticatorID: component.NewID("dummy")}, + Endpoint: "gss://123.124.32.12:90", + }, + }, + }, + { + desc: "properly configured endpoint missing auth", + expected: errMissingAuthExtension, + config: &Config{ + IdxEndpoint: confighttp.ClientConfig{ + Endpoint: "https://123.123.32.2:2093", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + err := test.config.Validate() + t.Logf("%v\n", err) + require.Error(t, err) + require.Contains(t, test.expected.Error(), err.Error()) + }) + } +} diff --git a/receiver/splunkenterprisereceiver/factory.go b/receiver/splunkenterprisereceiver/factory.go index 7c9a021730e3..d0b9343c63a1 100644 --- a/receiver/splunkenterprisereceiver/factory.go +++ b/receiver/splunkenterprisereceiver/factory.go @@ -28,6 +28,7 @@ func createDefaultConfig() component.Config { httpCfg.Headers = map[string]configopaque.String{ "Content-Type": "application/x-www-form-urlencoded", } + httpCfg.Timeout = defaultMaxSearchWaitTime // Default ScraperController settings scfg := scraperhelper.NewDefaultScraperControllerSettings(metadata.Type) @@ -35,7 +36,9 @@ func createDefaultConfig() component.Config { scfg.Timeout = defaultMaxSearchWaitTime return &Config{ - ClientConfig: httpCfg, + IdxEndpoint: httpCfg, + SHEndpoint: httpCfg, + CMEndpoint: httpCfg, ScraperControllerSettings: scfg, MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), } diff --git a/receiver/splunkenterprisereceiver/factory_test.go b/receiver/splunkenterprisereceiver/factory_test.go index 244bcc5980f4..be717b5c73f8 100644 --- a/receiver/splunkenterprisereceiver/factory_test.go +++ b/receiver/splunkenterprisereceiver/factory_test.go @@ -28,9 +28,12 @@ func TestDefaultConfig(t *testing.T) { cfg.Headers = map[string]configopaque.String{ "Content-Type": "application/x-www-form-urlencoded", } + cfg.Timeout = 60 * time.Second expectedConf := &Config{ - ClientConfig: cfg, + IdxEndpoint: cfg, + SHEndpoint: cfg, + CMEndpoint: cfg, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Minute, InitialDelay: 1 * time.Second, @@ -55,6 +58,9 @@ func TestCreateMetricsReceiver(t *testing.T) { t.Parallel() cfg := createDefaultConfig().(*Config) + cfg.CMEndpoint.Endpoint = "https://123.12.12.12:80" + cfg.IdxEndpoint.Endpoint = "https://123.12.12.12:80" + cfg.SHEndpoint.Endpoint = "https://123.12.12.12:80" _, err := createMetricsReceiver( context.Background(), diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 59a2b5bdb6fe..f132ed8c7b9e 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -82,14 +82,14 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Each metric has its own scrape function associated with it func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding - if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled || !s.splunkClient.isConfigured(typeCm) { return } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkLicenseIndexUsageSearch`], } @@ -156,16 +156,16 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm } func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgExecutionLatency.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkSchedulerAvgExecLatencySearch`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -234,16 +234,16 @@ func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcom } func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerAvgRate.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexerAvgRate`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -315,16 +315,16 @@ func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Ti } func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkAggregationQueueRatio.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkPipelineQueues`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -428,16 +428,16 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco } func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkBucketsSearchableStatus.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkBucketsSearchableStatus`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -516,16 +516,16 @@ func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now p } func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexesSize.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexesData`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -629,16 +629,16 @@ func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now p } func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerCompletionRatio.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkSchedulerCompletionRatio`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -707,16 +707,16 @@ func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context } func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerRawWriteTime.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexerRawWriteSeconds`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -785,16 +785,16 @@ func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, } func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerCPUTime.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIndexerCpuSeconds`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -863,16 +863,16 @@ func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now p } func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkIoAvgIops.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkIoAvgIops`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -941,16 +941,16 @@ func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Tim } func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var sr searchResponse // Because we have to utilize network resources for each KPI we should check that each metrics // is enabled before proceeding if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgRunTime.Enabled { return } - sr = searchResponse{ + sr := searchResponse{ search: searchDict[`SplunkSchedulerAvgRunTime`], } + ctx = context.WithValue(ctx, endpointType("type"), typeCm) var ( req *http.Request @@ -1041,14 +1041,14 @@ func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { // Scrape index throughput introspection endpoint func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it indexThroughput - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkIndexerThroughput`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it indexThroughput + + ept := apiDict[`SplunkIndexerThroughput`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1082,14 +1082,13 @@ func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.T // Scrape indexes extended total size func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IndexesExtended - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedTotalSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkDataIndexesExtended`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IndexesExtended + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1136,14 +1135,14 @@ func (s *splunkScraper) scrapeIndexesTotalSize(ctx context.Context, now pcommon. // Scrape indexes extended total event count func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IndexesExtended - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkDataIndexesExtended`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IndexesExtended + + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1183,14 +1182,14 @@ func (s *splunkScraper) scrapeIndexesEventCount(ctx context.Context, now pcommon // Scrape indexes extended total bucket count func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IndexesExtended - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkDataIndexesExtended`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IndexesExtended + + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1236,14 +1235,14 @@ func (s *splunkScraper) scrapeIndexesBucketCount(ctx context.Context, now pcommo // Scrape indexes extended raw size func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IndexesExtended - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedRawSize.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkDataIndexesExtended`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IndexesExtended + + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1289,14 +1288,14 @@ func (s *splunkScraper) scrapeIndexesRawSize(ctx context.Context, now pcommon.Ti // Scrape indexes extended bucket event count func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IndexesExtended - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketEventCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkDataIndexesExtended`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IndexesExtended + + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1359,14 +1358,14 @@ func (s *splunkScraper) scrapeIndexesBucketEventCount(ctx context.Context, now p // Scrape indexes extended bucket hot/warm count func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IndexesExtended - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkDataIndexesExtendedBucketHotCount.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkDataIndexesExtended`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IndexesExtended + + ept := apiDict[`SplunkDataIndexesExtended`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1422,14 +1421,14 @@ func (s *splunkScraper) scrapeIndexesBucketHotWarmCount(ctx context.Context, now // Scrape introspection queues func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IntrospectionQueues - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrent.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkIntrospectionQueues`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IntrospectionQueues + + ept := apiDict[`SplunkIntrospectionQueues`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { @@ -1470,14 +1469,14 @@ func (s *splunkScraper) scrapeIntrospectionQueues(ctx context.Context, now pcomm // Scrape introspection queues bytes func (s *splunkScraper) scrapeIntrospectionQueuesBytes(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { - var it IntrospectionQueues - var ept string - - if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled { + if !s.conf.MetricsBuilderConfig.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled || !s.splunkClient.isConfigured(typeIdx) { return } - ept = apiDict[`SplunkIntrospectionQueues`] + ctx = context.WithValue(ctx, endpointType("type"), typeIdx) + var it IntrospectionQueues + + ept := apiDict[`SplunkIntrospectionQueues`] req, err := s.splunkClient.createAPIRequest(ctx, ept) if err != nil { diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index c0ae5d7c624a..f6e6c547d1a2 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -8,7 +8,6 @@ import ( "net/http" "net/http/httptest" "path/filepath" - "strings" "testing" "time" @@ -51,12 +50,12 @@ func mockIntrospectionQueues(w http.ResponseWriter, _ *http.Request) { // mock server create func createMockServer() *httptest.Server { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch strings.TrimSpace(r.URL.Path) { - case "/services/server/introspection/indexer": + switch r.URL.String() { + case "/services/server/introspection/indexer?output_mode=json": mockIndexerThroughput(w, r) - case "/services/data/indexes-extended": + case "/services/data/indexes-extended?output_mode=json&count=-1": mockIndexesExtended(w, r) - case "/services/server/introspection/queues": + case "/services/server/introspection/queues?output_mode=json&count=-1": mockIntrospectionQueues(w, r) default: http.NotFoundHandler().ServeHTTP(w, r) @@ -84,11 +83,17 @@ func TestScraper(t *testing.T) { metricsettings.Metrics.SplunkServerIntrospectionQueuesCurrentBytes.Enabled = true cfg := &Config{ - ClientConfig: confighttp.ClientConfig{ + IdxEndpoint: confighttp.ClientConfig{ Endpoint: ts.URL, - Auth: &configauth.Authentication{ - AuthenticatorID: component.MustNewIDWithName("basicauth", "client"), - }, + Auth: &configauth.Authentication{AuthenticatorID: component.MustNewIDWithName("basicauth", "client")}, + }, + SHEndpoint: confighttp.ClientConfig{ + Endpoint: ts.URL, + Auth: &configauth.Authentication{AuthenticatorID: component.MustNewIDWithName("basicauth", "client")}, + }, + CMEndpoint: confighttp.ClientConfig{ + Endpoint: ts.URL, + Auth: &configauth.Authentication{AuthenticatorID: component.MustNewIDWithName("basicauth", "client")}, }, ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ CollectionInterval: 10 * time.Second, diff --git a/receiver/splunkenterprisereceiver/testdata/config.yaml b/receiver/splunkenterprisereceiver/testdata/config.yaml index 48bf9742b416..1c3bb2455d1b 100644 --- a/receiver/splunkenterprisereceiver/testdata/config.yaml +++ b/receiver/splunkenterprisereceiver/testdata/config.yaml @@ -1,15 +1,21 @@ # Example config for the Splunk Enterprise Receiver. -basicauth/client: +basicauth/search_head: client_auth: username: admin password: securityFirst +basicauth/indexer: + client_auth: + username: admin + password: securityFirst1! splunkenterprise: - # required settings - auth: basicauth/client # must use basicauthextension - endpoint: "https://localhost:8089" # Optional settings - collection_interval: 10s - timeout: 11s + indexer: + auth: + authenticator: basicauth/indexer + timeout: 10 + search_head: + auth: + authenticator: basicauth/search_head # Also optional: metric settings metrics: splunk.license.index.usage: