Skip to content

Commit

Permalink
Metrics cleanups in other packages
Browse files Browse the repository at this point in the history
Remove/deprecate statsd metrics, and rename an httpbp prometheus metrics
similar to the thriftbp one in
reddit#581.
  • Loading branch information
fishy committed Nov 1, 2022
1 parent 0ee5876 commit 9161f12
Show file tree
Hide file tree
Showing 10 changed files with 43 additions and 77 deletions.
5 changes: 3 additions & 2 deletions baseplate.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,13 @@ type Config struct {
StopDelay time.Duration `yaml:"stopDelay"`

Log log.Config `yaml:"log"`
Metrics metricsbp.Config `yaml:"metrics"`
Runtime runtimebp.Config `yaml:"runtime"`
Secrets secrets.Config `yaml:"secrets"`
Sentry log.SentryConfig `yaml:"sentry"`
Tracing tracing.Config `yaml:"tracing"`

// Deprecated: statsd metrics are deprecated.
Metrics metricsbp.Config `yaml:"metrics"`
}

// GetConfig implements Configer.
Expand Down Expand Up @@ -321,7 +323,6 @@ func New(ctx context.Context, args NewArgs) (context.Context, Baseplate, error)
bp.closers.Add(batchcloser.WrapCancel(cancel))

log.InitFromConfig(cfg.Log)
bp.closers.Add(metricsbp.InitFromConfig(ctx, cfg.Metrics))

closer, err := log.InitSentry(cfg.Sentry)
if err != nil {
Expand Down
16 changes: 0 additions & 16 deletions baseplate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
"github.com/reddit/baseplate.go/configbp"
"github.com/reddit/baseplate.go/ecinterface"
"github.com/reddit/baseplate.go/log"
"github.com/reddit/baseplate.go/metricsbp"
"github.com/reddit/baseplate.go/runtimebp"
"github.com/reddit/baseplate.go/secrets"
"github.com/reddit/baseplate.go/tracing"
Expand Down Expand Up @@ -285,10 +284,6 @@ func TestServeClosers(t *testing.T) {
}
}

func float64Ptr(v float64) *float64 {
return &v
}

type serviceConfig struct {
baseplate.Config `yaml:",inline"`

Expand All @@ -315,11 +310,6 @@ stopTimeout: 30s
log:
level: info
metrics:
namespace: baseplate-test
endpoint: metrics:8125
histogramSampleRate: 0.01
runtime:
numProcesses:
max: 100
Expand Down Expand Up @@ -347,12 +337,6 @@ redis:
Level: "info",
},

Metrics: metricsbp.Config{
Namespace: "baseplate-test",
Endpoint: "metrics:8125",
HistogramSampleRate: float64Ptr(0.01),
},

Runtime: runtimebp.Config{
NumProcesses: struct {
Max int `yaml:"max"`
Expand Down
34 changes: 5 additions & 29 deletions breakerbp/failure_ratio.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (

"github.com/reddit/baseplate.go/internal/prometheusbpint"
"github.com/reddit/baseplate.go/log"
"github.com/reddit/baseplate.go/metricsbp"
)

const (
Expand Down Expand Up @@ -58,7 +57,10 @@ type Config struct {
// EmitStatusMetrics sets whether the failure breaker will regularly update a gauge on the breakers state (closed or open/halfopen).
// When enabled, it emits metrics using the interval of EmitStatusMetricsInterval.
// If EmitStatusMetricsInterval <=0, metricsbp.SysStatsTickerInterval will be used as the fallback.
EmitStatusMetrics bool `yaml:"emitStatusMetrics"`
//
// Deprecated: Statsd metrics are deprecated.
EmitStatusMetrics bool `yaml:"emitStatusMetrics"`
// Deprecated: Statsd metrics are deprecated.
EmitStatusMetricsInterval time.Duration `yaml:"emitStatusMetricsInterval"`

// Logger is the logger to be called when the breaker changes states.
Expand All @@ -76,8 +78,7 @@ type Config struct {
Timeout time.Duration `yaml:"timeout"`
}

// NewFailureRatioBreaker creates a new FailureRatioBreaker with the provided configuration. Creates a new goroutine to emit
// breaker state metrics if EmitStatusMetrics is set to true. This goroutine is stopped when metricsbp.M.Ctx() is done().
// NewFailureRatioBreaker creates a new FailureRatioBreaker with the provided configuration.
func NewFailureRatioBreaker(config Config) FailureRatioBreaker {

failureBreaker := FailureRatioBreaker{
Expand All @@ -96,9 +97,6 @@ func NewFailureRatioBreaker(config Config) FailureRatioBreaker {
}

failureBreaker.goBreaker = gobreaker.NewCircuitBreaker(settings)
if config.EmitStatusMetrics {
go failureBreaker.runStatsProducer(config.EmitStatusMetricsInterval)
}

breakerClosed.With(prometheus.Labels{
nameLabel: config.Name,
Expand All @@ -107,28 +105,6 @@ func NewFailureRatioBreaker(config Config) FailureRatioBreaker {
return failureBreaker
}

func (cb FailureRatioBreaker) runStatsProducer(interval time.Duration) {
if interval <= 0 {
interval = metricsbp.SysStatsTickerInterval
}
circuitBreakerGauge := metricsbp.M.RuntimeGauge(cb.name + "-circuit-breaker-closed")

tick := time.NewTicker(interval)
defer tick.Stop()
for {
select {
case <-metricsbp.M.Ctx().Done():
return
case <-tick.C:
if cb.State() == gobreaker.StateOpen {
circuitBreakerGauge.Set(0)
} else {
circuitBreakerGauge.Set(1)
}
}
}
}

// Execute wraps the given function call in circuit breaker logic and returns
// the result.
func (cb FailureRatioBreaker) Execute(fn func() (interface{}, error)) (interface{}, error) {
Expand Down
13 changes: 7 additions & 6 deletions httpbp/middlewares.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,16 +70,14 @@ type DefaultMiddlewareArgs struct {
//
// 1. InjectServerSpan
// 2. InjectEdgeRequestContext
// 3. RecordStatusCode
// 4. PrometheusServerMetrics
// 3. PrometheusServerMetrics
func DefaultMiddleware(args DefaultMiddlewareArgs) []Middleware {
if args.TrustHandler == nil {
args.TrustHandler = NeverTrustHeaders{}
}
return []Middleware{
InjectServerSpan(args.TrustHandler),
InjectEdgeRequestContext(InjectEdgeRequestContextArgs(args)),
RecordStatusCode(),
PrometheusServerMetrics(""),
}
}
Expand Down Expand Up @@ -290,6 +288,9 @@ func recoverPanik(name string, next HandlerFunc) HandlerFunc {
counter := panicRecoverCounter.With(prometheus.Labels{
methodLabel: name,
})
legacyCounter := legacyPanicRecoverCounter.With(prometheus.Labels{
methodLabel: name,
})
return func(ctx context.Context, w http.ResponseWriter, r *http.Request) (err error) {
defer func() {
if r := recover(); r != nil {
Expand All @@ -304,10 +305,8 @@ func recoverPanik(name string, next HandlerFunc) HandlerFunc {
"err", rErr,
"endpoint", name,
)
metricsbp.M.Counter("panic.recover").With(
"name", name,
).Add(1)
counter.Inc()
legacyCounter.Inc()

// change named return value to a generic 500 error
err = RawError(InternalServerError(), rErr, PlainTextContentType)
Expand Down Expand Up @@ -422,6 +421,8 @@ func recordStatusCode(counters counterGenerator) Middleware {
// RecordStatusCode should generally not be used directly, instead use the
// NewBaseplateServer function which will automatically include RecordStatusCode
// as one of the Middlewares to wrap your handlers in.
//
// Deprecated: This is deprecated with statsd metrics.
func RecordStatusCode() Middleware {
return recordStatusCode(metricsbp.M)
}
Expand Down
10 changes: 8 additions & 2 deletions httpbp/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,17 @@ var (
methodLabel,
}

panicRecoverCounter = promauto.With(prometheusbpint.GlobalRegistry).NewCounterVec(prometheus.CounterOpts{
// TODO: Remove after next release (v0.9.12)
legacyPanicRecoverCounter = promauto.With(prometheusbpint.GlobalRegistry).NewCounterVec(prometheus.CounterOpts{
Namespace: promNamespace,
Subsystem: subsystemServer,
Name: "panic_recover_total",
Help: "The number of panics recovered from http server handlers",
Help: "Deprecated: use httpbp_server_recovered_panics_total instead",
}, panicRecoverLabels)

panicRecoverCounter = promauto.With(prometheusbpint.GlobalRegistry).NewCounterVec(prometheus.CounterOpts{
Name: "httpbp_server_recovered_panics_total",
Help: "The number of panics recovered from http server handlers",
}, panicRecoverLabels)
)

Expand Down
9 changes: 2 additions & 7 deletions internal/limitopen/limitopen.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (

"github.com/reddit/baseplate.go/internal/prometheusbpint"
"github.com/reddit/baseplate.go/log"
"github.com/reddit/baseplate.go/metricsbp"
)

const (
Expand Down Expand Up @@ -89,9 +88,8 @@ type readCloser struct {

// OpenWithLimit calls Open with limit checks.
//
// It always reports the size of the path as a runtime gauge
// (with "limitopen.size" as the metrics path and path label for statsd,
// "limitopen_file_size_bytes" for prometheus).
// It always reports the size of the path as a prometheus gauge of
// "limitopen_file_size_bytes".
// When softLimit > 0 and the size of the path as reported by the os is larger,
// it will also use log.DefaultWrapper to report it and increase prometheus
// counter of limitopen_softlimit_violation_total.
Expand All @@ -104,9 +102,6 @@ func OpenWithLimit(path string, softLimit, hardLimit int64) (io.ReadCloser, erro
}

pathValue := filepath.Base(path)
metricsbp.M.RuntimeGauge("limitopen.size").With(
"path", pathValue,
).Set(float64(size))
labels := prometheus.Labels{
pathLabel: pathValue,
}
Expand Down
9 changes: 0 additions & 9 deletions redis/db/redisbp/example_monitored_client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"github.com/go-redis/redis/v8"
"github.com/opentracing/opentracing-go"

"github.com/reddit/baseplate.go/metricsbp"
"github.com/reddit/baseplate.go/redis/db/redisbp"
"github.com/reddit/baseplate.go/tracing"
)
Expand Down Expand Up @@ -38,14 +37,6 @@ func ExampleNewMonitoredClient() {

defer client.Close()

// Spawn "MonitorPoolStats" in a separate goroutine.
go redisbp.MonitorPoolStats(
metricsbp.M.Ctx(),
client,
name,
metricsbp.Tags{"env": "test"},
)

// Create a "service" with a monitored client.
svc := Service{Redis: client}

Expand Down
3 changes: 3 additions & 0 deletions redis/db/redisbp/monitored_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,9 @@ func NewMonitoredClusterClient(name string, opt *redis.ClusterOptions) *ClusterC
// Ex:
//
// go factory.MonitorPoolStats(metricsbp.M.Ctx(), tags)
//
// Deprecated: Statsd metrics are deprecated. Prometheus pool stats metrics are
// always reported with a monitored client.
func MonitorPoolStats(ctx context.Context, client PoolStatser, name string, tags metricsbp.Tags) {
t := tags.AsStatsdTags()
prefix := name + ".pool"
Expand Down
12 changes: 9 additions & 3 deletions retrybp/doc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,23 @@ import (
"time"

"github.com/avast/retry-go"
"github.com/prometheus/client_golang/prometheus"

"github.com/reddit/baseplate.go/breakerbp"
"github.com/reddit/baseplate.go/log"
"github.com/reddit/baseplate.go/metricsbp"
"github.com/reddit/baseplate.go/retrybp"
)

// This example demonstrates how to use retrybp to retry an operation.
func Example() {
const timeout = time.Millisecond * 200

// prometheus counters
var (
errorCounter prometheus.Counter
successCounter prometheus.Counter
)

// TODO: use the actual type of your operation.
type resultType = int

Expand Down Expand Up @@ -69,14 +75,14 @@ func Example() {
),
retry.OnRetry(func(attempts uint, err error) {
if err != nil {
metricsbp.M.Counter("operation.failure").Add(1)
errorCounter.Inc()
log.Errorw(
"operation failed",
"err", err,
"attempt", attempts,
)
} else {
metricsbp.M.Counter("operation.succeed").Add(1)
successCounter.Inc()
}
}),
)
Expand Down
9 changes: 6 additions & 3 deletions signing/doc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import (
"errors"
"time"

"github.com/reddit/baseplate.go/metricsbp"
"github.com/prometheus/client_golang/prometheus"
"github.com/reddit/baseplate.go/secrets"
"github.com/reddit/baseplate.go/signing"
)
Expand All @@ -14,6 +14,9 @@ func Example() {
var (
store *secrets.Store
secretPath string

invalidSignatureCounter prometheus.Counter
expiredSignatureCounter prometheus.Counter
)

secret, _ := store.GetVersionedSecret(secretPath)
Expand All @@ -30,12 +33,12 @@ func Example() {
// Verify
err := signing.Verify([]byte(msg), signature, secret)
if err != nil {
metricsbp.M.Counter("invalid-signature").Add(1)
invalidSignatureCounter.Inc()
var e signing.VerifyError
if errors.As(err, &e) {
switch e.Reason {
case signing.VerifyErrorReasonExpired:
metricsbp.M.Counter("signature-expired").Add(1)
expiredSignatureCounter.Inc()
}
}
}
Expand Down

0 comments on commit 9161f12

Please sign in to comment.