TUN-528: Move cloudflared into a separate repo

This commit is contained in:
Areg Harutyunyan
2018-05-01 18:45:06 -05:00
parent e8c621a648
commit d06fc520c7
4726 changed files with 1763680 additions and 0 deletions

View File

@@ -0,0 +1 @@
command-line-arguments.test

View File

@@ -0,0 +1 @@
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).

View File

@@ -0,0 +1,185 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"sync"
"testing"
)
func BenchmarkCounterWithLabelValues(b *testing.B) {
m := NewCounterVec(
CounterOpts{
Name: "benchmark_counter",
Help: "A counter to benchmark it.",
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.WithLabelValues("eins", "zwei", "drei").Inc()
}
}
func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) {
m := NewCounterVec(
CounterOpts{
Name: "benchmark_counter",
Help: "A counter to benchmark it.",
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
wg := sync.WaitGroup{}
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
for j := 0; j < b.N/10; j++ {
m.WithLabelValues("eins", "zwei", "drei").Inc()
}
wg.Done()
}()
}
wg.Wait()
}
func BenchmarkCounterWithMappedLabels(b *testing.B) {
m := NewCounterVec(
CounterOpts{
Name: "benchmark_counter",
Help: "A counter to benchmark it.",
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc()
}
}
func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) {
m := NewCounterVec(
CounterOpts{
Name: "benchmark_counter",
Help: "A counter to benchmark it.",
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
labels := Labels{"two": "zwei", "one": "eins", "three": "drei"}
for i := 0; i < b.N; i++ {
m.With(labels).Inc()
}
}
func BenchmarkCounterNoLabels(b *testing.B) {
m := NewCounter(CounterOpts{
Name: "benchmark_counter",
Help: "A counter to benchmark it.",
})
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Inc()
}
}
func BenchmarkGaugeWithLabelValues(b *testing.B) {
m := NewGaugeVec(
GaugeOpts{
Name: "benchmark_gauge",
Help: "A gauge to benchmark it.",
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.WithLabelValues("eins", "zwei", "drei").Set(3.1415)
}
}
func BenchmarkGaugeNoLabels(b *testing.B) {
m := NewGauge(GaugeOpts{
Name: "benchmark_gauge",
Help: "A gauge to benchmark it.",
})
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Set(3.1415)
}
}
func BenchmarkSummaryWithLabelValues(b *testing.B) {
m := NewSummaryVec(
SummaryOpts{
Name: "benchmark_summary",
Help: "A summary to benchmark it.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
}
}
func BenchmarkSummaryNoLabels(b *testing.B) {
m := NewSummary(SummaryOpts{
Name: "benchmark_summary",
Help: "A summary to benchmark it.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Observe(3.1415)
}
}
func BenchmarkHistogramWithLabelValues(b *testing.B) {
m := NewHistogramVec(
HistogramOpts{
Name: "benchmark_histogram",
Help: "A histogram to benchmark it.",
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
}
}
func BenchmarkHistogramNoLabels(b *testing.B) {
m := NewHistogram(HistogramOpts{
Name: "benchmark_histogram",
Help: "A histogram to benchmark it.",
},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Observe(3.1415)
}
}

View File

@@ -0,0 +1,75 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection. See Registerer.Register.
//
// The stock metrics provided by this package (Gauge, Counter, Summary,
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
// namely itself). An implementer of Collector may, however, collect multiple
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
// for collectors already implemented in this library are the metric vectors
// (i.e. collection of multiple instances of the same Metric but with different
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc
// documentation. (It is valid if one and the same Collector sends
// duplicate descriptors. Those duplicates are simply ignored. However,
// two different Collectors must not send duplicate descriptors.) This
// method idempotently sends the same descriptors throughout the
// lifetime of the Collector. If a Collector encounters an error while
// executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by
// Describe. Returned metrics that share the same descriptor must differ
// in their variable label values. This method may be called
// concurrently and must therefore be implemented in a concurrency safe
// way. Blocking occurs at the expense of total performance of rendering
// all registered metrics. Ideally, Collector implementations support
// concurrent readers.
Collect(chan<- Metric)
}
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
type selfCollector struct {
self Metric
}
// init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
func (c *selfCollector) init(self Metric) {
c.self = self
}
// Describe implements Collector.
func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}

View File

@@ -0,0 +1,189 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
)
// Counter is a Metric that represents a single numerical value that only ever
// goes up. That implies that it cannot be used to count items whose number can
// also go down, e.g. the number of currently running goroutines. Those
// "counters" are represented by Gauges.
//
// A Counter is typically used to count requests served, tasks completed, errors
// occurred, etc.
//
// To create Counter instances, use NewCounter.
type Counter interface {
Metric
Collector
// Inc increments the counter by 1. Use Add to increment it by arbitrary
// non-negative values.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
Add(float64)
}
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
func NewCounter(opts CounterOpts) Counter {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
)
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
result.init(result) // Init self-collection.
return result
}
type counter struct {
value
}
func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
c.value.Add(v)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
type CounterVec struct {
*metricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &CounterVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.init(result) // Init self-collection.
return result
}),
}
}
// GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created.
//
// It is possible to call this method without using the returned Counter to only
// create the new Counter but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Counter for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Counter from the CounterVec. In that case,
// the Counter will still exist, but it will not be exported anymore, even if a
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc.
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
return m.metricVec.withLabelValues(lvs...).(Counter)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *CounterVec) With(labels Labels) Counter {
return m.metricVec.with(labels).(Counter)
}
// CounterFunc is a Counter whose value is determined at collect time by calling a
// provided function.
//
// To create CounterFunc instances, use NewCounterFunc.
type CounterFunc interface {
Metric
Collector
}
// NewCounterFunc creates a new CounterFunc based on the provided
// CounterOpts. The value reported is determined by calling the given function
// from within the Write method. Take into account that metric collection may
// happen concurrently. If that results in concurrent calls to Write, like in
// the case where a CounterFunc is directly registered with Prometheus, the
// provided function must be concurrency-safe. The function should also honor
// the contract for a Counter (values only go up, not down), but compliance will
// not be checked.
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), CounterValue, function)
}

View File

@@ -0,0 +1,114 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"math"
"testing"
dto "github.com/prometheus/client_model/go"
)
func TestCounterAdd(t *testing.T) {
counter := NewCounter(CounterOpts{
Name: "test",
Help: "test help",
ConstLabels: Labels{"a": "1", "b": "2"},
}).(*counter)
counter.Inc()
if expected, got := 1., math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got)
}
counter.Add(42)
if expected, got := 43., math.Float64frombits(counter.valBits); expected != got {
t.Errorf("Expected %f, got %f.", expected, got)
}
if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got {
t.Errorf("Expected error %q, got %q.", expected, got)
}
m := &dto.Metric{}
counter.Write(m)
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:43 > `, m.String(); expected != got {
t.Errorf("expected %q, got %q", expected, got)
}
}
func decreaseCounter(c *counter) (err error) {
defer func() {
if e := recover(); e != nil {
err = e.(error)
}
}()
c.Add(-1)
return nil
}
func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) {
testCases := []struct {
desc string
labels Labels
}{
{
desc: "non utf8 label value",
labels: Labels{"a": "\xFF"},
},
{
desc: "not enough label values",
labels: Labels{},
},
{
desc: "too many label values",
labels: Labels{"a": "1", "b": "2"},
},
}
for _, test := range testCases {
counterVec := NewCounterVec(CounterOpts{
Name: "test",
}, []string{"a"})
labelValues := make([]string, len(test.labels))
for _, val := range test.labels {
labelValues = append(labelValues, val)
}
expectPanic(t, func() {
counterVec.WithLabelValues(labelValues...)
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
expectPanic(t, func() {
counterVec.With(test.labels)
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil {
t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc)
}
if _, err := counterVec.GetMetricWith(test.labels); err == nil {
t.Errorf("GetMetricWith: expected error because: %s", test.desc)
}
}
}
func expectPanic(t *testing.T, op func(), errorMsg string) {
defer func() {
if err := recover(); err == nil {
t.Error(errorMsg)
}
}()
op()
}

View File

@@ -0,0 +1,189 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to
// deal with Desc if they use advanced features like the ExpvarCollector or
// custom Collectors and Metrics.
//
// Descriptors registered with the same registry have to fulfill certain
// consistency and uniqueness criteria if they share the same fully-qualified
// name: They must have the same help string and the same label names (aka label
// dimensions) in each, constLabels and variableLabels, but they must differ in
// the values of the constLabels.
//
// Descriptors that share the same fully-qualified names and the same label
// values of their constLabels are considered equal.
//
// Use NewDesc to create new Desc instances.
type Desc struct {
// fqName has been built from Namespace, Subsystem, and Name.
fqName string
// help provides some helpful information about this metric.
help string
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// VariableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
id uint64
// dimHash is a hash of the label names (preset and variable) and the
// Help string. Each Desc with the same fqName must have the same
// dimHash.
dimHash uint64
// err is an error that occurred during construction. It is reported on
// registration time.
err error
}
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName and help must not be empty.
//
// variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Opts documentation for the implications of
// constant labels.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
variableLabels: variableLabels,
}
if help == "" {
d.err = errors.New("empty help string")
return d
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
// labelValues contains the label values of const labels (in order of
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name", labelName)
return d
}
labelNames = append(labelNames, labelName)
labelNameSet[labelName] = struct{}{}
}
sort.Strings(labelNames)
// ... so that we can now add const label values in the order of their names.
for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName])
}
// Validate the const label values. They can't have a wrong cardinality, so
// use in len(labelValues) as expectedNumberOfValues.
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
d.err = err
return d
}
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name", labelName)
return d
}
labelNames = append(labelNames, "$"+labelName)
labelNameSet[labelName] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
d.err = errors.New("duplicate label names")
return d
}
vh := hashNew()
for _, val := range labelValues {
vh = hashAdd(vh, val)
vh = hashAddByte(vh, separatorByte)
}
d.id = vh
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
lh := hashNew()
lh = hashAdd(lh, help)
lh = hashAddByte(lh, separatorByte)
for _, labelName := range labelNames {
lh = hashAdd(lh, labelName)
lh = hashAddByte(lh, separatorByte)
}
d.dimHash = lh
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
Name: proto.String(n),
Value: proto.String(v),
})
}
sort.Sort(LabelPairSorter(d.constLabelPairs))
return d
}
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
// provided error set. If a collector returning such a descriptor is registered,
// registration will fail with the provided error. NewInvalidDesc can be used by
// a Collector to signal inability to describe itself.
func NewInvalidDesc(err error) *Desc {
return &Desc{
err: err,
}
}
func (d *Desc) String() string {
lpStrings := make([]string, 0, len(d.constLabelPairs))
for _, lp := range d.constLabelPairs {
lpStrings = append(
lpStrings,
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
d.variableLabels,
)
}

View File

@@ -0,0 +1,17 @@
package prometheus
import (
"testing"
)
func TestNewDescInvalidLabelValues(t *testing.T) {
desc := NewDesc(
"sample_label",
"sample label",
nil,
Labels{"a": "\xFF"},
)
if desc.err == nil {
t.Errorf("NewDesc: expected error because: %s", desc.err)
}
}

View File

@@ -0,0 +1,186 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus provides metrics primitives to instrument code for
// monitoring. It also offers a registry for metrics. Sub-packages allow to
// expose the registered metrics via HTTP (package promhttp) or push them to a
// Pushgateway (package push).
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
// A Basic Example
//
// As a starting point, a very basic usage example:
//
// package main
//
// import (
// "log"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var (
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// })
// hdFailures = prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// )
// )
//
// func init() {
// // Metrics have to be registered to be exposed:
// prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures)
// }
//
// func main() {
// cpuTemp.Set(65.3)
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
//
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
//
// Metrics
//
// The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
// Prometheus server not to assume anything about its type.
//
// In addition to the fundamental metric types Gauge, Counter, Summary,
// Histogram, and Untyped, a very important part of the Prometheus data model is
// the partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// HistogramVec, and UntypedVec.
//
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
// SummaryVec, HistogramVec, and UntypedVec are not.
//
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
// UntypedOpts.
//
// Custom Collectors and constant Metrics
//
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
// glance, a custom Collector seems handy to bundle Metrics for common
// registration (with the prime example of the different metric vectors above,
// which bundle all the metrics of the same name but with different labels).
//
// There is a more involved use case, too: If you already have metrics
// available, created outside of the Prometheus context, you don't need the
// interface of the various Metric types. You essentially want to mirror the
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created later.
// NewDesc comes in handy to create those Desc instances.
//
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
// metrics) as examples that are used in this package itself.
//
// If you just need to call a function to get a single float value to collect as
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might cause.
// As suggested by the name, MustRegister panics if an error occurs. With the
// Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data model.
// Inconsistencies are ideally detected at registration time, not at collect
// time. The former will usually be detected at start-up time of a program,
// while the latter will only happen at scrape time, possibly not even on the
// first scrape if the inconsistency only becomes relevant later. That is the
// main reason why a Collector and a Metric have to describe themselves to the
// registry.
//
// So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegistry variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in the
// same way on a custom registry as the global functions Register and Unregister
// on the default registry.
//
// There are a number of uses for custom registries: You can use registries with
// special properties, see NewPedanticRegistry. You can avoid global state, as
// it is imposed by the DefaultRegistry. You can use multiple registries at the
// same time to expose different metrics in different ways. You can use separate
// registries for testing purposes.
//
// Also note that the DefaultRegistry comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
// HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
// (The top-level functions in the prometheus package are deprecated.)
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
// Graphite Bridge
//
// Functions and examples to push metrics from a Gatherer to Graphite can be
// found in the graphite sub-package.
//
// Other Means of Exposition
//
// More ways of exposing metrics can easily be added by following the approaches
// of the existing implementations.
package prometheus

View File

@@ -0,0 +1,118 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import "github.com/prometheus/client_golang/prometheus"
// ClusterManager is an example for a system that might have been built without
// Prometheus in mind. It models a central manager of jobs running in a
// cluster. To turn it into something that collects Prometheus metrics, we
// simply add the two methods required for the Collector interface.
//
// An additional challenge is that multiple instances of the ClusterManager are
// run within the same binary, each in charge of a different zone. We need to
// make use of ConstLabels to be able to register each ClusterManager instance
// with Prometheus.
type ClusterManager struct {
Zone string
OOMCountDesc *prometheus.Desc
RAMUsageDesc *prometheus.Desc
// ... many more fields
}
// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
// real cluster manager would have to do. Since it may actually be really
// expensive, it must only be called once per collection. This implementation,
// obviously, only returns some made-up data.
func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
oomCountByHost map[string]int, ramUsageByHost map[string]float64,
) {
// Just example fake data.
oomCountByHost = map[string]int{
"foo.example.org": 42,
"bar.example.org": 2001,
}
ramUsageByHost = map[string]float64{
"foo.example.org": 6.023e23,
"bar.example.org": 3.14,
}
return
}
// Describe simply sends the two Descs in the struct to the channel.
func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
ch <- c.OOMCountDesc
ch <- c.RAMUsageDesc
}
// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
// creates constant metrics for each host on the fly based on the returned data.
//
// Note that Collect could be called concurrently, so we depend on
// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()
for host, oomCount := range oomCountByHost {
ch <- prometheus.MustNewConstMetric(
c.OOMCountDesc,
prometheus.CounterValue,
float64(oomCount),
host,
)
}
for host, ramUsage := range ramUsageByHost {
ch <- prometheus.MustNewConstMetric(
c.RAMUsageDesc,
prometheus.GaugeValue,
ramUsage,
host,
)
}
}
// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note
// that the zone is set as a ConstLabel. (It's different in each instance of the
// ClusterManager, but constant over the lifetime of an instance.) Then there is
// a variable label "host", since we want to partition the collected metrics by
// host. Since all Descs created in this way are consistent across instances,
// with a guaranteed distinction by the "zone" label, we can register different
// ClusterManager instances with the same registry.
func NewClusterManager(zone string) *ClusterManager {
return &ClusterManager{
Zone: zone,
OOMCountDesc: prometheus.NewDesc(
"clustermanager_oom_crashes_total",
"Number of OOM crashes.",
[]string{"host"},
prometheus.Labels{"zone": zone},
),
RAMUsageDesc: prometheus.NewDesc(
"clustermanager_ram_usage_bytes",
"RAM usage as reported to the cluster manager.",
[]string{"host"},
prometheus.Labels{"zone": zone},
),
}
}
func ExampleCollector() {
workerDB := NewClusterManager("db")
workerCA := NewClusterManager("ca")
// Since we are dealing with custom Collector implementations, it might
// be a good idea to try it out with a pedantic registry.
reg := prometheus.NewPedanticRegistry()
reg.MustRegister(workerDB)
reg.MustRegister(workerCA)
}

View File

@@ -0,0 +1,71 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
var (
// apiRequestDuration tracks the duration separate for each HTTP status
// class (1xx, 2xx, ...). This creates a fair amount of time series on
// the Prometheus server. Usually, you would track the duration of
// serving HTTP request without partitioning by outcome. Do something
// like this only if needed. Also note how only status classes are
// tracked, not every single status code. The latter would create an
// even larger amount of time series. Request counters partitioned by
// status code are usually OK as each counter only creates one time
// series. Histograms are way more expensive, so partition with care and
// only where you really need separate latency tracking. Partitioning by
// status class is only an example. In concrete cases, other partitions
// might make more sense.
apiRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "api_request_duration_seconds",
Help: "Histogram for the request duration of the public API, partitioned by status class.",
Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
},
[]string{"status_class"},
)
)
func handler(w http.ResponseWriter, r *http.Request) {
status := http.StatusOK
// The ObserverFunc gets called by the deferred ObserveDuration and
// decides which Histogram's Observe method is called.
timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
switch {
case status >= 500: // Server error.
apiRequestDuration.WithLabelValues("5xx").Observe(v)
case status >= 400: // Client error.
apiRequestDuration.WithLabelValues("4xx").Observe(v)
case status >= 300: // Redirection.
apiRequestDuration.WithLabelValues("3xx").Observe(v)
case status >= 200: // Success.
apiRequestDuration.WithLabelValues("2xx").Observe(v)
default: // Informational.
apiRequestDuration.WithLabelValues("1xx").Observe(v)
}
}))
defer timer.ObserveDuration()
// Handle the request. Set status accordingly.
// ...
}
func ExampleTimer_complex() {
http.HandleFunc("/api", handler)
}

View File

@@ -0,0 +1,48 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"os"
"github.com/prometheus/client_golang/prometheus"
)
var (
// If a function is called rarely (i.e. not more often than scrapes
// happen) or ideally only once (like in a batch job), it can make sense
// to use a Gauge for timing the function call. For timing a batch job
// and pushing the result to a Pushgateway, see also the comprehensive
// example in the push package.
funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "example_function_duration_seconds",
Help: "Duration of the last call of an example function.",
})
)
func run() error {
// The Set method of the Gauge is used to observe the duration.
timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set))
defer timer.ObserveDuration()
// Do something. Return errors as encountered. The use of 'defer' above
// makes sure the function is still timed properly.
return nil
}
func ExampleTimer_gauge() {
if err := run(); err != nil {
os.Exit(1)
}
}

View File

@@ -0,0 +1,40 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"math/rand"
"time"
"github.com/prometheus/client_golang/prometheus"
)
var (
requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "example_request_duration_seconds",
Help: "Histogram for the runtime of a simple example function.",
Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
})
)
func ExampleTimer() {
// timer times this example function. It uses a Histogram, but a Summary
// would also work, as both implement Observer. Check out
// https://prometheus.io/docs/practices/histograms/ for differences.
timer := prometheus.NewTimer(requestDuration)
defer timer.ObserveDuration()
// Do something here that takes time.
time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond)
}

View File

@@ -0,0 +1,754 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"bytes"
"fmt"
"math"
"net/http"
"runtime"
"sort"
"strings"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
)
func ExampleGauge() {
opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "our_company",
Subsystem: "blob_storage",
Name: "ops_queued",
Help: "Number of blob storage operations waiting to be processed.",
})
prometheus.MustRegister(opsQueued)
// 10 operations queued by the goroutine managing incoming requests.
opsQueued.Add(10)
// A worker goroutine has picked up a waiting operation.
opsQueued.Dec()
// And once more...
opsQueued.Dec()
}
func ExampleGaugeVec() {
opsQueued := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "our_company",
Subsystem: "blob_storage",
Name: "ops_queued",
Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.",
},
[]string{
// Which user has requested the operation?
"user",
// Of what type is the operation?
"type",
},
)
prometheus.MustRegister(opsQueued)
// Increase a value using compact (but order-sensitive!) WithLabelValues().
opsQueued.WithLabelValues("bob", "put").Add(4)
// Increase a value with a map using WithLabels. More verbose, but order
// doesn't matter anymore.
opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc()
}
func ExampleGaugeFunc() {
if err := prometheus.Register(prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Subsystem: "runtime",
Name: "goroutines_count",
Help: "Number of goroutines that currently exist.",
},
func() float64 { return float64(runtime.NumGoroutine()) },
)); err == nil {
fmt.Println("GaugeFunc 'goroutines_count' registered.")
}
// Note that the count of goroutines is a gauge (and not a counter) as
// it can go up and down.
// Output:
// GaugeFunc 'goroutines_count' registered.
}
func ExampleCounter() {
pushCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "repository_pushes", // Note: No help string...
})
err := prometheus.Register(pushCounter) // ... so this will return an error.
if err != nil {
fmt.Println("Push counter couldn't be registered, no counting will happen:", err)
return
}
// Try it once more, this time with a help string.
pushCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "repository_pushes",
Help: "Number of pushes to external repository.",
})
err = prometheus.Register(pushCounter)
if err != nil {
fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err)
return
}
pushComplete := make(chan struct{})
// TODO: Start a goroutine that performs repository pushes and reports
// each completion via the channel.
for range pushComplete {
pushCounter.Inc()
}
// Output:
// Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
}
func ExampleCounterVec() {
httpReqs := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "http_requests_total",
Help: "How many HTTP requests processed, partitioned by status code and HTTP method.",
},
[]string{"code", "method"},
)
prometheus.MustRegister(httpReqs)
httpReqs.WithLabelValues("404", "POST").Add(42)
// If you have to access the same set of labels very frequently, it
// might be good to retrieve the metric only once and keep a handle to
// it. But beware of deletion of that metric, see below!
m := httpReqs.WithLabelValues("200", "GET")
for i := 0; i < 1000000; i++ {
m.Inc()
}
// Delete a metric from the vector. If you have previously kept a handle
// to that metric (as above), future updates via that handle will go
// unseen (even if you re-create a metric with the same label set
// later).
httpReqs.DeleteLabelValues("200", "GET")
// Same thing with the more verbose Labels syntax.
httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"})
}
func ExampleInstrumentHandler() {
// Handle the "/doc" endpoint with the standard http.FileServer handler.
// By wrapping the handler with InstrumentHandler, request count,
// request and response sizes, and request latency are automatically
// exported to Prometheus, partitioned by HTTP status code and method
// and by the handler name (here "fileserver").
http.Handle("/doc", prometheus.InstrumentHandler(
"fileserver", http.FileServer(http.Dir("/usr/share/doc")),
))
// The Prometheus handler still has to be registered to handle the
// "/metrics" endpoint. The handler returned by prometheus.Handler() is
// already instrumented - with "prometheus" as the handler name. In this
// example, we want the handler name to be "metrics", so we instrument
// the uninstrumented Prometheus handler ourselves.
http.Handle("/metrics", prometheus.InstrumentHandler(
"metrics", prometheus.UninstrumentedHandler(),
))
}
func ExampleLabelPairSorter() {
labelPairs := []*dto.LabelPair{
{Name: proto.String("status"), Value: proto.String("404")},
{Name: proto.String("method"), Value: proto.String("get")},
}
sort.Sort(prometheus.LabelPairSorter(labelPairs))
fmt.Println(labelPairs)
// Output:
// [name:"method" value:"get" name:"status" value:"404" ]
}
func ExampleRegister() {
// Imagine you have a worker pool and want to count the tasks completed.
taskCounter := prometheus.NewCounter(prometheus.CounterOpts{
Subsystem: "worker_pool",
Name: "completed_tasks_total",
Help: "Total number of tasks completed.",
})
// This will register fine.
if err := prometheus.Register(taskCounter); err != nil {
fmt.Println(err)
} else {
fmt.Println("taskCounter registered.")
}
// Don't forget to tell the HTTP server about the Prometheus handler.
// (In a real program, you still need to start the HTTP server...)
http.Handle("/metrics", prometheus.Handler())
// Now you can start workers and give every one of them a pointer to
// taskCounter and let it increment it whenever it completes a task.
taskCounter.Inc() // This has to happen somewhere in the worker code.
// But wait, you want to see how individual workers perform. So you need
// a vector of counters, with one element for each worker.
taskCounterVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: "worker_pool",
Name: "completed_tasks_total",
Help: "Total number of tasks completed.",
},
[]string{"worker_id"},
)
// Registering will fail because we already have a metric of that name.
if err := prometheus.Register(taskCounterVec); err != nil {
fmt.Println("taskCounterVec not registered:", err)
} else {
fmt.Println("taskCounterVec registered.")
}
// To fix, first unregister the old taskCounter.
if prometheus.Unregister(taskCounter) {
fmt.Println("taskCounter unregistered.")
}
// Try registering taskCounterVec again.
if err := prometheus.Register(taskCounterVec); err != nil {
fmt.Println("taskCounterVec not registered:", err)
} else {
fmt.Println("taskCounterVec registered.")
}
// Bummer! Still doesn't work.
// Prometheus will not allow you to ever export metrics with
// inconsistent help strings or label names. After unregistering, the
// unregistered metrics will cease to show up in the /metrics HTTP
// response, but the registry still remembers that those metrics had
// been exported before. For this example, we will now choose a
// different name. (In a real program, you would obviously not export
// the obsolete metric in the first place.)
taskCounterVec = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: "worker_pool",
Name: "completed_tasks_by_id",
Help: "Total number of tasks completed.",
},
[]string{"worker_id"},
)
if err := prometheus.Register(taskCounterVec); err != nil {
fmt.Println("taskCounterVec not registered:", err)
} else {
fmt.Println("taskCounterVec registered.")
}
// Finally it worked!
// The workers have to tell taskCounterVec their id to increment the
// right element in the metric vector.
taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42.
// Each worker could also keep a reference to their own counter element
// around. Pick the counter at initialization time of the worker.
myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code.
myCounter.Inc() // Somewhere in the code of that worker.
// Note that something like WithLabelValues("42", "spurious arg") would
// panic (because you have provided too many label values). If you want
// to get an error instead, use GetMetricWithLabelValues(...) instead.
notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg")
if err != nil {
fmt.Println("Worker initialization failed:", err)
}
if notMyCounter == nil {
fmt.Println("notMyCounter is nil.")
}
// A different (and somewhat tricky) approach is to use
// ConstLabels. ConstLabels are pairs of label names and label values
// that never change. You might ask what those labels are good for (and
// rightfully so - if they never change, they could as well be part of
// the metric name). There are essentially two use-cases: The first is
// if labels are constant throughout the lifetime of a binary execution,
// but they vary over time or between different instances of a running
// binary. The second is what we have here: Each worker creates and
// registers an own Counter instance where the only difference is in the
// value of the ConstLabels. Those Counters can all be registered
// because the different ConstLabel values guarantee that each worker
// will increment a different Counter metric.
counterOpts := prometheus.CounterOpts{
Subsystem: "worker_pool",
Name: "completed_tasks",
Help: "Total number of tasks completed.",
ConstLabels: prometheus.Labels{"worker_id": "42"},
}
taskCounterForWorker42 := prometheus.NewCounter(counterOpts)
if err := prometheus.Register(taskCounterForWorker42); err != nil {
fmt.Println("taskCounterVForWorker42 not registered:", err)
} else {
fmt.Println("taskCounterForWorker42 registered.")
}
// Obviously, in real code, taskCounterForWorker42 would be a member
// variable of a worker struct, and the "42" would be retrieved with a
// GetId() method or something. The Counter would be created and
// registered in the initialization code of the worker.
// For the creation of the next Counter, we can recycle
// counterOpts. Just change the ConstLabels.
counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"}
taskCounterForWorker2001 := prometheus.NewCounter(counterOpts)
if err := prometheus.Register(taskCounterForWorker2001); err != nil {
fmt.Println("taskCounterVForWorker2001 not registered:", err)
} else {
fmt.Println("taskCounterForWorker2001 registered.")
}
taskCounterForWorker2001.Inc()
taskCounterForWorker42.Inc()
taskCounterForWorker2001.Inc()
// Yet another approach would be to turn the workers themselves into
// Collectors and register them. See the Collector example for details.
// Output:
// taskCounter registered.
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
// taskCounter unregistered.
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
// taskCounterVec registered.
// Worker initialization failed: inconsistent label cardinality
// notMyCounter is nil.
// taskCounterForWorker42 registered.
// taskCounterForWorker2001 registered.
}
func ExampleSummary() {
temps := prometheus.NewSummary(prometheus.SummaryOpts{
Name: "pond_temperature_celsius",
Help: "The temperature of the frog pond.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
})
// Simulate some observations.
for i := 0; i < 1000; i++ {
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
}
// Just for demonstration, let's check the state of the summary by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
temps.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// summary: <
// sample_count: 1000
// sample_sum: 29969.50000000001
// quantile: <
// quantile: 0.5
// value: 31.1
// >
// quantile: <
// quantile: 0.9
// value: 41.3
// >
// quantile: <
// quantile: 0.99
// value: 41.9
// >
// >
}
func ExampleSummaryVec() {
temps := prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "pond_temperature_celsius",
Help: "The temperature of the frog pond.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"species"},
)
// Simulate some observations.
for i := 0; i < 1000; i++ {
temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10)
}
// Create a Summary without any observations.
temps.WithLabelValues("leiopelma-hochstetteri")
// Just for demonstration, let's check the state of the summary vector
// by registering it with a custom registry and then let it collect the
// metrics.
reg := prometheus.NewRegistry()
reg.MustRegister(temps)
metricFamilies, err := reg.Gather()
if err != nil || len(metricFamilies) != 1 {
panic("unexpected behavior of custom test registry")
}
fmt.Println(proto.MarshalTextString(metricFamilies[0]))
// Output:
// name: "pond_temperature_celsius"
// help: "The temperature of the frog pond."
// type: SUMMARY
// metric: <
// label: <
// name: "species"
// value: "leiopelma-hochstetteri"
// >
// summary: <
// sample_count: 0
// sample_sum: 0
// quantile: <
// quantile: 0.5
// value: nan
// >
// quantile: <
// quantile: 0.9
// value: nan
// >
// quantile: <
// quantile: 0.99
// value: nan
// >
// >
// >
// metric: <
// label: <
// name: "species"
// value: "lithobates-catesbeianus"
// >
// summary: <
// sample_count: 1000
// sample_sum: 31956.100000000017
// quantile: <
// quantile: 0.5
// value: 32.4
// >
// quantile: <
// quantile: 0.9
// value: 41.4
// >
// quantile: <
// quantile: 0.99
// value: 41.9
// >
// >
// >
// metric: <
// label: <
// name: "species"
// value: "litoria-caerulea"
// >
// summary: <
// sample_count: 1000
// sample_sum: 29969.50000000001
// quantile: <
// quantile: 0.5
// value: 31.1
// >
// quantile: <
// quantile: 0.9
// value: 41.3
// >
// quantile: <
// quantile: 0.99
// value: 41.9
// >
// >
// >
}
func ExampleNewConstSummary() {
desc := prometheus.NewDesc(
"http_request_duration_seconds",
"A summary of the HTTP request durations.",
[]string{"code", "method"},
prometheus.Labels{"owner": "example"},
)
// Create a constant summary from values we got from a 3rd party telemetry system.
s := prometheus.MustNewConstSummary(
desc,
4711, 403.34,
map[float64]float64{0.5: 42.3, 0.9: 323.3},
"200", "get",
)
// Just for demonstration, let's check the state of the summary by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
s.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// label: <
// name: "code"
// value: "200"
// >
// label: <
// name: "method"
// value: "get"
// >
// label: <
// name: "owner"
// value: "example"
// >
// summary: <
// sample_count: 4711
// sample_sum: 403.34
// quantile: <
// quantile: 0.5
// value: 42.3
// >
// quantile: <
// quantile: 0.9
// value: 323.3
// >
// >
}
func ExampleHistogram() {
temps := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "pond_temperature_celsius",
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide.
})
// Simulate some observations.
for i := 0; i < 1000; i++ {
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
}
// Just for demonstration, let's check the state of the histogram by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
temps.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// histogram: <
// sample_count: 1000
// sample_sum: 29969.50000000001
// bucket: <
// cumulative_count: 192
// upper_bound: 20
// >
// bucket: <
// cumulative_count: 366
// upper_bound: 25
// >
// bucket: <
// cumulative_count: 501
// upper_bound: 30
// >
// bucket: <
// cumulative_count: 638
// upper_bound: 35
// >
// bucket: <
// cumulative_count: 816
// upper_bound: 40
// >
// >
}
func ExampleNewConstHistogram() {
desc := prometheus.NewDesc(
"http_request_duration_seconds",
"A histogram of the HTTP request durations.",
[]string{"code", "method"},
prometheus.Labels{"owner": "example"},
)
// Create a constant histogram from values we got from a 3rd party telemetry system.
h := prometheus.MustNewConstHistogram(
desc,
4711, 403.34,
map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233},
"200", "get",
)
// Just for demonstration, let's check the state of the histogram by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
h.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// label: <
// name: "code"
// value: "200"
// >
// label: <
// name: "method"
// value: "get"
// >
// label: <
// name: "owner"
// value: "example"
// >
// histogram: <
// sample_count: 4711
// sample_sum: 403.34
// bucket: <
// cumulative_count: 121
// upper_bound: 25
// >
// bucket: <
// cumulative_count: 2403
// upper_bound: 50
// >
// bucket: <
// cumulative_count: 3221
// upper_bound: 100
// >
// bucket: <
// cumulative_count: 4233
// upper_bound: 200
// >
// >
}
func ExampleAlreadyRegisteredError() {
reqCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "requests_total",
Help: "The total number of requests served.",
})
if err := prometheus.Register(reqCounter); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
// A counter for that metric has been registered before.
// Use the old counter from now on.
reqCounter = are.ExistingCollector.(prometheus.Counter)
} else {
// Something else went wrong!
panic(err)
}
}
reqCounter.Inc()
}
func ExampleGatherers() {
reg := prometheus.NewRegistry()
temp := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "temperature_kelvin",
Help: "Temperature in Kelvin.",
},
[]string{"location"},
)
reg.MustRegister(temp)
temp.WithLabelValues("outside").Set(273.14)
temp.WithLabelValues("inside").Set(298.44)
var parser expfmt.TextParser
text := `
# TYPE humidity_percent gauge
# HELP humidity_percent Humidity in %.
humidity_percent{location="outside"} 45.4
humidity_percent{location="inside"} 33.2
# TYPE temperature_kelvin gauge
# HELP temperature_kelvin Temperature in Kelvin.
temperature_kelvin{location="somewhere else"} 4.5
`
parseText := func() ([]*dto.MetricFamily, error) {
parsed, err := parser.TextToMetricFamilies(strings.NewReader(text))
if err != nil {
return nil, err
}
var result []*dto.MetricFamily
for _, mf := range parsed {
result = append(result, mf)
}
return result, nil
}
gatherers := prometheus.Gatherers{
reg,
prometheus.GathererFunc(parseText),
}
gathering, err := gatherers.Gather()
if err != nil {
fmt.Println(err)
}
out := &bytes.Buffer{}
for _, mf := range gathering {
if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
panic(err)
}
}
fmt.Print(out.String())
fmt.Println("----------")
// Note how the temperature_kelvin metric family has been merged from
// different sources. Now try
text = `
# TYPE humidity_percent gauge
# HELP humidity_percent Humidity in %.
humidity_percent{location="outside"} 45.4
humidity_percent{location="inside"} 33.2
# TYPE temperature_kelvin gauge
# HELP temperature_kelvin Temperature in Kelvin.
# Duplicate metric:
temperature_kelvin{location="outside"} 265.3
# Wrong labels:
temperature_kelvin 4.5
`
gathering, err = gatherers.Gather()
if err != nil {
fmt.Println(err)
}
// Note that still as many metrics as possible are returned:
out.Reset()
for _, mf := range gathering {
if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
panic(err)
}
}
fmt.Print(out.String())
// Output:
// # HELP humidity_percent Humidity in %.
// # TYPE humidity_percent gauge
// humidity_percent{location="inside"} 33.2
// humidity_percent{location="outside"} 45.4
// # HELP temperature_kelvin Temperature in Kelvin.
// # TYPE temperature_kelvin gauge
// temperature_kelvin{location="inside"} 298.44
// temperature_kelvin{location="outside"} 273.14
// temperature_kelvin{location="somewhere else"} 4.5
// ----------
// 2 error(s) occurred:
// * collected metric temperature_kelvin label:<name:"location" value:"outside" > gauge:<value:265.3 > was collected before with the same name and label values
// * collected metric temperature_kelvin gauge:<value:4.5 > has label dimensions inconsistent with previously collected metrics in the same metric family
// # HELP humidity_percent Humidity in %.
// # TYPE humidity_percent gauge
// humidity_percent{location="inside"} 33.2
// humidity_percent{location="outside"} 45.4
// # HELP temperature_kelvin Temperature in Kelvin.
// # TYPE temperature_kelvin gauge
// temperature_kelvin{location="inside"} 298.44
// temperature_kelvin{location="outside"} 273.14
}

View File

@@ -0,0 +1,119 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"encoding/json"
"expvar"
)
type expvarCollector struct {
exports map[string]*Desc
}
// NewExpvarCollector returns a newly allocated expvar Collector that still has
// to be registered with a Prometheus registry.
//
// An expvar Collector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
//
// The exports map has the following meaning:
//
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
// want to export as Prometheus metric, you need an entry in the exports
// map. The descriptor mapped to each key describes how to export the expvar
// value. It defines the name and the help string of the Prometheus metric
// proxying the expvar value. The type will always be Untyped.
//
// For descriptors without variable labels, the expvar value must be a number or
// a bool. The number is then directly exported as the Prometheus sample
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
// that are not numbers or bools are silently ignored.
//
// If the descriptor has one variable label, the expvar value must be an expvar
// map. The keys in the expvar map become the various values of the one
// Prometheus label. The values in the expvar map must be numbers or bools again
// as above.
//
// For descriptors with more than one variable label, the expvar must be a
// nested expvar map, i.e. where the values of the topmost map are maps again
// etc. until a depth is reached that corresponds to the number of labels. The
// leaves of that structure must be numbers or bools as above to serve as the
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{
exports: exports,
}
}
// Describe implements Collector.
func (e *expvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports {
ch <- desc
}
}
// Collect implements Collector.
func (e *expvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports {
var m Metric
expVar := expvar.Get(name)
if expVar == nil {
continue
}
var v interface{}
labels := make([]string, len(desc.variableLabels))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue
}
var processValue func(v interface{}, i int)
processValue = func(v interface{}, i int) {
if i >= len(labels) {
copiedLabels := append(make([]string, 0, len(labels)), labels...)
switch v := v.(type) {
case float64:
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
case bool:
if v {
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
} else {
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
}
default:
return
}
ch <- m
return
}
vm, ok := v.(map[string]interface{})
if !ok {
return
}
for lv, val := range vm {
labels[i] = lv
processValue(val, i+1)
}
}
processValue(v, 0)
}
}

View File

@@ -0,0 +1,97 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"expvar"
"fmt"
"sort"
"strings"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
func ExampleNewExpvarCollector() {
expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
"memstats": prometheus.NewDesc(
"expvar_memstats",
"All numeric memstats as one metric family. Not a good role-model, actually... ;-)",
[]string{"type"}, nil,
),
"lone-int": prometheus.NewDesc(
"expvar_lone_int",
"Just an expvar int as an example.",
nil, nil,
),
"http-request-map": prometheus.NewDesc(
"expvar_http_request_total",
"How many http requests processed, partitioned by status code and http method.",
[]string{"code", "method"}, nil,
),
})
prometheus.MustRegister(expvarCollector)
// The Prometheus part is done here. But to show that this example is
// doing anything, we have to manually export something via expvar. In
// real-life use-cases, some library would already have exported via
// expvar what we want to re-export as Prometheus metrics.
expvar.NewInt("lone-int").Set(42)
expvarMap := expvar.NewMap("http-request-map")
var (
expvarMap1, expvarMap2 expvar.Map
expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int
)
expvarMap1.Init()
expvarMap2.Init()
expvarInt11.Set(3)
expvarInt12.Set(13)
expvarInt21.Set(11)
expvarInt22.Set(212)
expvarMap1.Set("POST", &expvarInt11)
expvarMap1.Set("GET", &expvarInt12)
expvarMap2.Set("POST", &expvarInt21)
expvarMap2.Set("GET", &expvarInt22)
expvarMap.Set("404", &expvarMap1)
expvarMap.Set("200", &expvarMap2)
// Results in the following expvar map:
// "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}}
// Let's see what the scrape would yield, but exclude the memstats metrics.
metricStrings := []string{}
metric := dto.Metric{}
metricChan := make(chan prometheus.Metric)
go func() {
expvarCollector.Collect(metricChan)
close(metricChan)
}()
for m := range metricChan {
if strings.Index(m.Desc().String(), "expvar_memstats") == -1 {
metric.Reset()
m.Write(&metric)
metricStrings = append(metricStrings, metric.String())
}
}
sort.Strings(metricStrings)
for _, s := range metricStrings {
fmt.Println(strings.TrimRight(s, " "))
}
// Output:
// label:<name:"code" value:"200" > label:<name:"method" value:"GET" > untyped:<value:212 >
// label:<name:"code" value:"200" > label:<name:"method" value:"POST" > untyped:<value:11 >
// label:<name:"code" value:"404" > label:<name:"method" value:"GET" > untyped:<value:13 >
// label:<name:"code" value:"404" > label:<name:"method" value:"POST" > untyped:<value:3 >
// untyped:<value:42 >
}

View File

@@ -0,0 +1,29 @@
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}

View File

@@ -0,0 +1,173 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
// A Gauge is typically used for measured values like temperatures or current
// memory usage, but also "counts" that can go up and down, like the number of
// running goroutines.
//
// To create Gauge instances, use NewGauge.
type Gauge interface {
Metric
Collector
// Set sets the Gauge to an arbitrary value.
Set(float64)
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
// values.
Inc()
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
// values.
Dec()
// Add adds the given value to the Gauge. (The value can be negative,
// resulting in a decrease of the Gauge.)
Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.)
Sub(float64)
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
SetToCurrentTime()
}
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
// NewGauge creates a new Gauge based on the provided GaugeOpts.
func NewGauge(opts GaugeOpts) Gauge {
return newValue(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), GaugeValue, 0)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
// Desc, but have different values for their variable labels. This is used if
// you want to count the same thing partitioned by various dimensions
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
*metricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &GaugeVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Gauge for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// label values is accessed for the first time, a new Gauge is created.
//
// It is possible to call this method without using the returned Gauge to only
// create the new Gauge but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Gauge for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
// Gauge will still exist, but it will not be exported anymore, even if a
// Gauge with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
// GetMetricWith returns the Gauge for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// accessed for the first time, a new Gauge is created. Implications of
// creating a Gauge without using it and keeping the Gauge for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc.
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
return m.metricVec.withLabelValues(lvs...).(Gauge)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *GaugeVec) With(labels Labels) Gauge {
return m.metricVec.with(labels).(Gauge)
}
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
// provided function.
//
// To create GaugeFunc instances, use NewGaugeFunc.
type GaugeFunc interface {
Metric
Collector
}
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
// value reported is determined by calling the given function from within the
// Write method. Take into account that metric collection may happen
// concurrently. If that results in concurrent calls to Write, like in the case
// where a GaugeFunc is directly registered with Prometheus, the provided
// function must be concurrency-safe.
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), GaugeValue, function)
}

View File

@@ -0,0 +1,202 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"math/rand"
"sync"
"testing"
"testing/quick"
"time"
dto "github.com/prometheus/client_model/go"
)
func listenGaugeStream(vals, result chan float64, done chan struct{}) {
var sum float64
outer:
for {
select {
case <-done:
close(vals)
for v := range vals {
sum += v
}
break outer
case v := <-vals:
sum += v
}
}
result <- sum
close(result)
}
func TestGaugeConcurrency(t *testing.T) {
it := func(n uint32) bool {
mutations := int(n % 10000)
concLevel := int(n%15 + 1)
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sStream := make(chan float64, mutations*concLevel)
result := make(chan float64)
done := make(chan struct{})
go listenGaugeStream(sStream, result, done)
go func() {
end.Wait()
close(done)
}()
gge := NewGauge(GaugeOpts{
Name: "test_gauge",
Help: "no help can be found here",
})
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
for j := 0; j < mutations; j++ {
vals[j] = rand.Float64() - 0.5
}
go func(vals []float64) {
start.Wait()
for _, v := range vals {
sStream <- v
gge.Add(v)
}
end.Done()
}(vals)
}
start.Done()
if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 {
t.Fatalf("expected approx. %f, got %f", expected, got)
return false
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Fatal(err)
}
}
func TestGaugeVecConcurrency(t *testing.T) {
it := func(n uint32) bool {
mutations := int(n % 10000)
concLevel := int(n%15 + 1)
vecLength := int(n%5 + 1)
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sStreams := make([]chan float64, vecLength)
results := make([]chan float64, vecLength)
done := make(chan struct{})
for i := 0; i < vecLength; i++ {
sStreams[i] = make(chan float64, mutations*concLevel)
results[i] = make(chan float64)
go listenGaugeStream(sStreams[i], results[i], done)
}
go func() {
end.Wait()
close(done)
}()
gge := NewGaugeVec(
GaugeOpts{
Name: "test_gauge",
Help: "no help can be found here",
},
[]string{"label"},
)
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
pick := make([]int, mutations)
for j := 0; j < mutations; j++ {
vals[j] = rand.Float64() - 0.5
pick[j] = rand.Intn(vecLength)
}
go func(vals []float64) {
start.Wait()
for i, v := range vals {
sStreams[pick[i]] <- v
gge.WithLabelValues(string('A' + pick[i])).Add(v)
}
end.Done()
}(vals)
}
start.Done()
for i := range sStreams {
if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 {
t.Fatalf("expected approx. %f, got %f", expected, got)
return false
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Fatal(err)
}
}
func TestGaugeFunc(t *testing.T) {
gf := NewGaugeFunc(
GaugeOpts{
Name: "test_name",
Help: "test help",
ConstLabels: Labels{"a": "1", "b": "2"},
},
func() float64 { return 3.1415 },
)
if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got {
t.Errorf("expected %q, got %q", expected, got)
}
m := &dto.Metric{}
gf.Write(m)
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > gauge:<value:3.1415 > `, m.String(); expected != got {
t.Errorf("expected %q, got %q", expected, got)
}
}
func TestGaugeSetCurrentTime(t *testing.T) {
g := NewGauge(GaugeOpts{
Name: "test_name",
Help: "test help",
})
g.SetToCurrentTime()
unixTime := float64(time.Now().Unix())
m := &dto.Metric{}
g.Write(m)
delta := unixTime - m.GetGauge().GetValue()
// This is just a smoke test to make sure SetToCurrentTime is not
// totally off. Tests with current time involved are hard...
if math.Abs(delta) > 5 {
t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta)
}
}

View File

@@ -0,0 +1,284 @@
package prometheus
import (
"fmt"
"runtime"
"runtime/debug"
"time"
)
type goCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
goInfoDesc *Desc
// metrics to describe and collect
metrics memStatsMetrics
}
// NewGoCollector returns a collector which exports metrics about the current
// go process.
func NewGoCollector() Collector {
return &goCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
nil, nil),
threadsDesc: NewDesc(
"go_threads",
"Number of OS threads created.",
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
metrics: memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
"Number of bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("lookups_total"),
"Total number of pointer lookups.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
"Total number of frees.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
"Number of heap bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
"Number of heap bytes waiting to be used.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
"Number of heap bytes that are in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
"Number of allocated objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
"Number of bytes in use by the stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
"Number of bytes obtained from system for stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
"Number of bytes in use by mspan structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
"Number of bytes used for mspan structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
"Number of bytes in use by mcache structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
"Number of bytes used for mcache structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
"Number of bytes used by the profiling bucket hash table.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
"Number of bytes used for other system allocations.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
"Number of heap bytes when next garbage collection will take place.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
},
}
}
func memstatNamespace(s string) string {
return fmt.Sprintf("go_memstats_%s", s)
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
ch <- c.goInfoDesc
for _, i := range c.metrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
debug.ReadGCStats(&stats)
quantiles := make(map[float64]float64)
for idx, pq := range stats.PauseQuantiles[1:] {
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
ms := &runtime.MemStats{}
runtime.ReadMemStats(ms)
for _, i := range c.metrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
}
// memStatsMetrics provide description, value, and value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}

View File

@@ -0,0 +1,127 @@
package prometheus
import (
"runtime"
"testing"
"time"
dto "github.com/prometheus/client_model/go"
)
func TestGoCollector(t *testing.T) {
var (
c = NewGoCollector()
ch = make(chan Metric)
waitc = make(chan struct{})
closec = make(chan struct{})
old = -1
)
defer close(closec)
go func() {
c.Collect(ch)
go func(c <-chan struct{}) {
<-c
}(closec)
<-waitc
c.Collect(ch)
}()
for {
select {
case m := <-ch:
// m can be Gauge or Counter,
// currently just test the go_goroutines Gauge
// and ignore others.
if m.Desc().fqName != "go_goroutines" {
continue
}
pb := &dto.Metric{}
m.Write(pb)
if pb.GetGauge() == nil {
continue
}
if old == -1 {
old = int(pb.GetGauge().GetValue())
close(waitc)
continue
}
if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
// TODO: This is flaky in highly concurrent situations.
t.Errorf("want 1 new goroutine, got %d", diff)
}
// GoCollector performs three sends per call.
// On line 27 we need to receive three more sends
// to shut down cleanly.
<-ch
<-ch
<-ch
return
case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out")
}
}
}
func TestGCCollector(t *testing.T) {
var (
c = NewGoCollector()
ch = make(chan Metric)
waitc = make(chan struct{})
closec = make(chan struct{})
oldGC uint64
oldPause float64
)
defer close(closec)
go func() {
c.Collect(ch)
// force GC
runtime.GC()
<-waitc
c.Collect(ch)
}()
first := true
for {
select {
case metric := <-ch:
switch m := metric.(type) {
case *constSummary, *value:
pb := &dto.Metric{}
m.Write(pb)
if pb.GetSummary() == nil {
continue
}
if len(pb.GetSummary().Quantile) != 5 {
t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
}
for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
if *pb.GetSummary().Quantile[idx].Quantile != want {
t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
}
}
if first {
first = false
oldGC = *pb.GetSummary().SampleCount
oldPause = *pb.GetSummary().SampleSum
close(waitc)
continue
}
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
t.Errorf("want 1 new garbage collection run, got %d", diff)
}
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
t.Errorf("want moar pause, got %f", diff)
}
return
}
case <-time.After(1 * time.Second):
t.Fatalf("expected collect timed out")
}
}
}

View File

@@ -0,0 +1,280 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package graphite provides a bridge to push Prometheus metrics to a Graphite
// server.
package graphite
import (
"bufio"
"errors"
"fmt"
"io"
"net"
"sort"
"time"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
const (
defaultInterval = 15 * time.Second
millisecondsPerSecond = 1000
)
// HandlerErrorHandling defines how a Handler serving metrics will handle
// errors.
type HandlerErrorHandling int
// These constants cause handlers serving metrics to behave as described if
// errors are encountered.
const (
// Ignore errors and try to push as many metrics to Graphite as possible.
ContinueOnError HandlerErrorHandling = iota
// Abort the push to Graphite upon the first error encountered.
AbortOnError
)
// Config defines the Graphite bridge config.
type Config struct {
// The url to push data to. Required.
URL string
// The prefix for the pushed Graphite metrics. Defaults to empty string.
Prefix string
// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
Interval time.Duration
// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
Timeout time.Duration
// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
Gatherer prometheus.Gatherer
// The logger that messages are written to. Defaults to no logging.
Logger Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided Logger
// is not nil.
ErrorHandling HandlerErrorHandling
}
// Bridge pushes metrics to the configured Graphite server.
type Bridge struct {
url string
prefix string
interval time.Duration
timeout time.Duration
errorHandling HandlerErrorHandling
logger Logger
g prometheus.Gatherer
}
// Logger is the minimal interface Bridge needs for logging. Note that
// log.Logger from the standard library implements this interface, and it is
// easy to implement by custom loggers, if they don't do so already anyway.
type Logger interface {
Println(v ...interface{})
}
// NewBridge returns a pointer to a new Bridge struct.
func NewBridge(c *Config) (*Bridge, error) {
b := &Bridge{}
if c.URL == "" {
return nil, errors.New("missing URL")
}
b.url = c.URL
if c.Gatherer == nil {
b.g = prometheus.DefaultGatherer
} else {
b.g = c.Gatherer
}
if c.Logger != nil {
b.logger = c.Logger
}
if c.Prefix != "" {
b.prefix = c.Prefix
}
var z time.Duration
if c.Interval == z {
b.interval = defaultInterval
} else {
b.interval = c.Interval
}
if c.Timeout == z {
b.timeout = defaultInterval
} else {
b.timeout = c.Timeout
}
b.errorHandling = c.ErrorHandling
return b, nil
}
// Run starts the event loop that pushes Prometheus metrics to Graphite at the
// configured interval.
func (b *Bridge) Run(ctx context.Context) {
ticker := time.NewTicker(b.interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := b.Push(); err != nil && b.logger != nil {
b.logger.Println("error pushing to Graphite:", err)
}
case <-ctx.Done():
return
}
}
}
// Push pushes Prometheus metrics to the configured Graphite server.
func (b *Bridge) Push() error {
mfs, err := b.g.Gather()
if err != nil || len(mfs) == 0 {
switch b.errorHandling {
case AbortOnError:
return err
case ContinueOnError:
if b.logger != nil {
b.logger.Println("continue on error:", err)
}
default:
panic("unrecognized error handling value")
}
}
conn, err := net.DialTimeout("tcp", b.url, b.timeout)
if err != nil {
return err
}
defer conn.Close()
return writeMetrics(conn, mfs, b.prefix, model.Now())
}
func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
Timestamp: now,
}, mfs...)
if err != nil {
return err
}
buf := bufio.NewWriter(w)
for _, s := range vec {
if err := writeSanitized(buf, prefix); err != nil {
return err
}
if err := buf.WriteByte('.'); err != nil {
return err
}
if err := writeMetric(buf, s.Metric); err != nil {
return err
}
if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
return err
}
if err := buf.Flush(); err != nil {
return err
}
}
return nil
}
func writeMetric(buf *bufio.Writer, m model.Metric) error {
metricName, hasName := m[model.MetricNameLabel]
numLabels := len(m) - 1
if !hasName {
numLabels = len(m)
}
labelStrings := make([]string, 0, numLabels)
for label, value := range m {
if label != model.MetricNameLabel {
labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
}
}
var err error
switch numLabels {
case 0:
if hasName {
return writeSanitized(buf, string(metricName))
}
default:
sort.Strings(labelStrings)
if err = writeSanitized(buf, string(metricName)); err != nil {
return err
}
for _, s := range labelStrings {
if err = buf.WriteByte('.'); err != nil {
return err
}
if err = writeSanitized(buf, s); err != nil {
return err
}
}
}
return nil
}
func writeSanitized(buf *bufio.Writer, s string) error {
prevUnderscore := false
for _, c := range s {
c = replaceInvalidRune(c)
if c == '_' {
if prevUnderscore {
continue
}
prevUnderscore = true
} else {
prevUnderscore = false
}
if _, err := buf.WriteRune(c); err != nil {
return err
}
}
return nil
}
func replaceInvalidRune(c rune) rune {
if c == ' ' {
return '.'
}
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
return '_'
}
return c
}

View File

@@ -0,0 +1,309 @@
package graphite
import (
"bufio"
"bytes"
"io"
"log"
"net"
"os"
"regexp"
"testing"
"time"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
"github.com/prometheus/client_golang/prometheus"
)
func TestSanitize(t *testing.T) {
testCases := []struct {
in, out string
}{
{in: "hello", out: "hello"},
{in: "hE/l1o", out: "hE_l1o"},
{in: "he,*ll(.o", out: "he_ll_o"},
{in: "hello_there%^&", out: "hello_there_"},
}
var buf bytes.Buffer
w := bufio.NewWriter(&buf)
for i, tc := range testCases {
if err := writeSanitized(w, tc.in); err != nil {
t.Fatalf("write failed: %v", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush failed: %v", err)
}
if want, got := tc.out, buf.String(); want != got {
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
}
buf.Reset()
}
}
func TestWriteSummary(t *testing.T) {
sumVec := prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"labelname"},
)
sumVec.WithLabelValues("val1").Observe(float64(10))
sumVec.WithLabelValues("val1").Observe(float64(20))
sumVec.WithLabelValues("val1").Observe(float64(30))
sumVec.WithLabelValues("val2").Observe(float64(20))
sumVec.WithLabelValues("val2").Observe(float64(30))
sumVec.WithLabelValues("val2").Observe(float64(40))
reg := prometheus.NewRegistry()
reg.MustRegister(sumVec)
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
now := model.Time(1477043083)
var buf bytes.Buffer
err = writeMetrics(&buf, mfs, "prefix", now)
if err != nil {
t.Fatalf("error: %v", err)
}
want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
prefix.name_count.constname.constvalue.labelname.val1 3 1477043
prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
prefix.name_count.constname.constvalue.labelname.val2 3 1477043
`
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestWriteHistogram(t *testing.T) {
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
Buckets: []float64{0.01, 0.02, 0.05, 0.1},
},
[]string{"labelname"},
)
histVec.WithLabelValues("val1").Observe(float64(10))
histVec.WithLabelValues("val1").Observe(float64(20))
histVec.WithLabelValues("val1").Observe(float64(30))
histVec.WithLabelValues("val2").Observe(float64(20))
histVec.WithLabelValues("val2").Observe(float64(30))
histVec.WithLabelValues("val2").Observe(float64(40))
reg := prometheus.NewRegistry()
reg.MustRegister(histVec)
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
now := model.Time(1477043083)
var buf bytes.Buffer
err = writeMetrics(&buf, mfs, "prefix", now)
if err != nil {
t.Fatalf("error: %v", err)
}
want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
prefix.name_count.constname.constvalue.labelname.val1 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
prefix.name_count.constname.constvalue.labelname.val2 3 1477043
prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
`
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestToReader(t *testing.T) {
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
reg := prometheus.NewRegistry()
reg.MustRegister(cntVec)
want := `prefix.name.constname.constvalue.labelname.val1 1 1477043
prefix.name.constname.constvalue.labelname.val2 1 1477043
`
mfs, err := reg.Gather()
if err != nil {
t.Fatalf("error: %v", err)
}
now := model.Time(1477043083)
var buf bytes.Buffer
err = writeMetrics(&buf, mfs, "prefix", now)
if err != nil {
t.Fatalf("error: %v", err)
}
if got := buf.String(); want != got {
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
}
}
func TestPush(t *testing.T) {
reg := prometheus.NewRegistry()
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
reg.MustRegister(cntVec)
host := "localhost"
port := ":56789"
b, err := NewBridge(&Config{
URL: host + port,
Gatherer: reg,
Prefix: "prefix",
})
if err != nil {
t.Fatalf("error creating bridge: %v", err)
}
nmg, err := newMockGraphite(port)
if err != nil {
t.Fatalf("error creating mock graphite: %v", err)
}
defer nmg.Close()
err = b.Push()
if err != nil {
t.Fatalf("error pushing: %v", err)
}
wants := []string{
"prefix.name.constname.constvalue.labelname.val1 1",
"prefix.name.constname.constvalue.labelname.val2 1",
}
select {
case got := <-nmg.readc:
for _, want := range wants {
matched, err := regexp.MatchString(want, got)
if err != nil {
t.Fatalf("error pushing: %v", err)
}
if !matched {
t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
}
}
return
case err := <-nmg.errc:
t.Fatalf("error reading push: %v", err)
case <-time.After(50 * time.Millisecond):
t.Fatalf("no result from graphite server")
}
}
func newMockGraphite(port string) (*mockGraphite, error) {
readc := make(chan string)
errc := make(chan error)
ln, err := net.Listen("tcp", port)
if err != nil {
return nil, err
}
go func() {
conn, err := ln.Accept()
if err != nil {
errc <- err
}
var b bytes.Buffer
io.Copy(&b, conn)
readc <- b.String()
}()
return &mockGraphite{
readc: readc,
errc: errc,
Listener: ln,
}, nil
}
type mockGraphite struct {
readc chan string
errc chan error
net.Listener
}
func ExampleBridge() {
b, err := NewBridge(&Config{
URL: "graphite.example.org:3099",
Gatherer: prometheus.DefaultGatherer,
Prefix: "prefix",
Interval: 15 * time.Second,
Timeout: 10 * time.Second,
ErrorHandling: AbortOnError,
Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile),
})
if err != nil {
panic(err)
}
go func() {
// Start something in a goroutine that uses metrics.
}()
// Push initial metrics to Graphite. Fail fast if the push fails.
if err := b.Push(); err != nil {
panic(err)
}
// Create a Context to control stopping the Run() loop that pushes
// metrics to Graphite.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Start pushing metrics to Graphite in the Run() loop.
b.Run(ctx)
}

View File

@@ -0,0 +1,473 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"math"
"sort"
"sync/atomic"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
// A Histogram counts individual observations from an event or sample stream in
// configurable buckets. Similar to a summary, it also provides a sum of
// observations and an observation count.
//
// On the Prometheus server, quantiles can be calculated from a Histogram using
// the histogram_quantile function in the query language.
//
// Note that Histograms, in contrast to Summaries, can be aggregated with the
// Prometheus query language (see the documentation for detailed
// procedures). However, Histograms require the user to pre-define suitable
// buckets, and they are in general less accurate. The Observe method of a
// Histogram has a very low performance overhead in comparison with the Observe
// method of a Summary.
//
// To create Histogram instances, use NewHistogram.
type Histogram interface {
Metric
Collector
// Observe adds a single observation to the histogram.
Observe(float64)
}
// bucketLabel is used for the label that defines the upper bound of a
// bucket of a histogram ("le" -> "less or equal").
const bucketLabel = "le"
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
var (
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf(
"%q is not allowed as label name in histograms", bucketLabel,
)
)
// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is zero or negative.
func LinearBuckets(start, width float64, count int) []float64 {
if count < 1 {
panic("LinearBuckets needs a positive count")
}
buckets := make([]float64, count)
for i := range buckets {
buckets[i] = start
start += width
}
return buckets
}
// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
// upper bound of 'start' and each following bucket's upper bound is 'factor'
// times the previous bucket's upper bound. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
// or if 'factor' is less than or equal 1.
func ExponentialBuckets(start, factor float64, count int) []float64 {
if count < 1 {
panic("ExponentialBuckets needs a positive count")
}
if start <= 0 {
panic("ExponentialBuckets needs a positive start value")
}
if factor <= 1 {
panic("ExponentialBuckets needs a factor greater than 1")
}
buckets := make([]float64, count)
for i := range buckets {
buckets[i] = start
start *= factor
}
return buckets
}
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are
// optional and can safely be left at their zero value.
type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the Histogram must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this Histogram. Mandatory!
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this
// Histogram. Histograms with the same fully-qualified name must have the
// same label names in their ConstLabels.
//
// Note that in most cases, labels have a value that varies during the
// lifetime of a process. Those labels are usually managed with a
// HistogramVec. ConstLabels serve only special purposes. One is for the
// special case where the value of a label does not change during the
// lifetime of a process, e.g. if the revision of the running binary is
// put into a label. Another, more advanced purpose is if more than one
// Collector needs to collect Histograms with the same fully-qualified
// name. In that case, those Summaries must differ in the values of
// their ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
// element in the slice is the upper inclusive bound of a bucket. The
// values must be sorted in strictly increasing order. There is no need
// to add a highest bucket with +Inf bound, it will be added
// implicitly. The default value is DefBuckets.
Buckets []float64
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
func NewHistogram(opts HistogramOpts) Histogram {
return newHistogram(
NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
),
opts,
)
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
panic(errInconsistentCardinality)
}
for _, n := range desc.variableLabels {
if n == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
for _, lp := range desc.constLabelPairs {
if lp.GetName() == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
if len(opts.Buckets) == 0 {
opts.Buckets = DefBuckets
}
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
if upperBound >= h.upperBounds[i+1] {
panic(fmt.Errorf(
"histogram buckets must be in increasing order: %f >= %f",
upperBound, h.upperBounds[i+1],
))
}
} else {
if math.IsInf(upperBound, +1) {
// The +Inf bucket is implicit. Remove it here.
h.upperBounds = h.upperBounds[:i]
}
}
}
// Finally we know the final length of h.upperBounds and can make counts.
h.counts = make([]uint64, len(h.upperBounds))
h.init(h) // Init self-collection.
return h
}
type histogram struct {
// sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64
count uint64
selfCollector
// Note that there is no mutex required.
desc *Desc
upperBounds []float64
counts []uint64
labelPairs []*dto.LabelPair
}
func (h *histogram) Desc() *Desc {
return h.desc
}
func (h *histogram) Observe(v float64) {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v)
if i < len(h.counts) {
atomic.AddUint64(&h.counts[i], 1)
}
atomic.AddUint64(&h.count, 1)
for {
oldBits := atomic.LoadUint64(&h.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
break
}
}
}
func (h *histogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
buckets := make([]*dto.Bucket, len(h.upperBounds))
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
var count uint64
for i, upperBound := range h.upperBounds {
count += atomic.LoadUint64(&h.counts[i])
buckets[i] = &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(upperBound),
}
}
his.Bucket = buckets
out.Histogram = his
out.Label = h.labelPairs
return nil
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
*metricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &HistogramVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Histogram for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// label values is accessed for the first time, a new Histogram is created.
//
// It is possible to call this method without using the returned Histogram to only
// create the new Histogram but leave it at its starting value, a Histogram without
// any observations.
//
// Keeping the Histogram for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
// Histogram will still exist, but it will not be exported anymore, even if a
// Histogram with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
return nil, err
}
// GetMetricWith returns the Histogram for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// accessed for the first time, a new Histogram is created. Implications of
// creating a Histogram without using it and keeping the Histogram for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc.
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *HistogramVec) WithLabelValues(lvs ...string) Observer {
return m.metricVec.withLabelValues(lvs...).(Observer)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (m *HistogramVec) With(labels Labels) Observer {
return m.metricVec.with(labels).(Observer)
}
type constHistogram struct {
desc *Desc
count uint64
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
}
func (h *constHistogram) Desc() *Desc {
return h.desc
}
func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
buckets := make([]*dto.Bucket, 0, len(h.buckets))
his.SampleCount = proto.Uint64(h.count)
his.SampleSum = proto.Float64(h.sum)
for upperBound, count := range h.buckets {
buckets = append(buckets, &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(upperBound),
})
}
if len(buckets) > 0 {
sort.Sort(buckSort(buckets))
}
his.Bucket = buckets
out.Histogram = his
out.Label = h.labelPairs
return nil
}
// NewConstHistogram returns a metric representing a Prometheus histogram with
// fixed values for the count, sum, and bucket counts. As those parameters
// cannot be changed, the returned value does not implement the Histogram
// interface (but only the Metric interface). Users of this package will not
// have much use for it in regular operations. However, when implementing custom
// Collectors, it is useful as a throw-away metric that is generated on the fly
// to send it to Prometheus in the Collect method.
//
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
// bucket.
//
// NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc.
func NewConstHistogram(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
return &constHistogram{
desc: desc,
count: count,
sum: sum,
buckets: buckets,
labelPairs: makeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstHistogram is a version of NewConstHistogram that panics where
// NewConstMetric would have returned an error.
func MustNewConstHistogram(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
labelValues ...string,
) Metric {
m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
if err != nil {
panic(err)
}
return m
}
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
return len(s)
}
func (s buckSort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s buckSort) Less(i, j int) bool {
return s[i].GetUpperBound() < s[j].GetUpperBound()
}

View File

@@ -0,0 +1,348 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"math/rand"
"reflect"
"sort"
"sync"
"testing"
"testing/quick"
dto "github.com/prometheus/client_model/go"
)
func benchmarkHistogramObserve(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewHistogram(HistogramOpts{})
for i := 0; i < w; i++ {
go func() {
g.Wait()
for i := 0; i < b.N; i++ {
s.Observe(float64(i))
}
wg.Done()
}()
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkHistogramObserve1(b *testing.B) {
benchmarkHistogramObserve(1, b)
}
func BenchmarkHistogramObserve2(b *testing.B) {
benchmarkHistogramObserve(2, b)
}
func BenchmarkHistogramObserve4(b *testing.B) {
benchmarkHistogramObserve(4, b)
}
func BenchmarkHistogramObserve8(b *testing.B) {
benchmarkHistogramObserve(8, b)
}
func benchmarkHistogramWrite(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewHistogram(HistogramOpts{})
for i := 0; i < 1000000; i++ {
s.Observe(float64(i))
}
for j := 0; j < w; j++ {
outs := make([]dto.Metric, b.N)
go func(o []dto.Metric) {
g.Wait()
for i := 0; i < b.N; i++ {
s.Write(&o[i])
}
wg.Done()
}(outs)
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkHistogramWrite1(b *testing.B) {
benchmarkHistogramWrite(1, b)
}
func BenchmarkHistogramWrite2(b *testing.B) {
benchmarkHistogramWrite(2, b)
}
func BenchmarkHistogramWrite4(b *testing.B) {
benchmarkHistogramWrite(4, b)
}
func BenchmarkHistogramWrite8(b *testing.B) {
benchmarkHistogramWrite(8, b)
}
func TestHistogramNonMonotonicBuckets(t *testing.T) {
testCases := map[string][]float64{
"not strictly monotonic": {1, 2, 2, 3},
"not monotonic at all": {1, 2, 4, 3, 5},
"have +Inf in the middle": {1, 2, math.Inf(+1), 3},
}
for name, buckets := range testCases {
func() {
defer func() {
if r := recover(); r == nil {
t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name)
}
}()
_ = NewHistogram(HistogramOpts{
Name: "test_histogram",
Help: "helpless",
Buckets: buckets,
})
}()
}
}
// Intentionally adding +Inf here to test if that case is handled correctly.
// Also, getCumulativeCounts depends on it.
var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}
func TestHistogramConcurrency(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
}
rand.Seed(42)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%5 + 1)
total := mutations * concLevel
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sum := NewHistogram(HistogramOpts{
Name: "test_histogram",
Help: "helpless",
Buckets: testBuckets,
})
allVars := make([]float64, total)
var sampleSum float64
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
allVars[i*mutations+j] = v
sampleSum += v
}
go func(vals []float64) {
start.Wait()
for _, v := range vals {
sum.Observe(v)
}
end.Done()
}(vals)
}
sort.Float64s(allVars)
start.Done()
end.Wait()
m := &dto.Metric{}
sum.Write(m)
if got, want := int(*m.Histogram.SampleCount), total; got != want {
t.Errorf("got sample count %d, want %d", got, want)
}
if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f, want %f", got, want)
}
wantCounts := getCumulativeCounts(allVars)
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
t.Errorf("got %d buckets in protobuf, want %d", got, want)
}
for i, wantBound := range testBuckets {
if i == len(testBuckets)-1 {
break // No +Inf bucket in protobuf.
}
if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound {
t.Errorf("got bound %f, want %f", gotBound, wantBound)
}
if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount {
t.Errorf("got count %d, want %d", gotCount, wantCount)
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func TestHistogramVecConcurrency(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
}
rand.Seed(42)
objectives := make([]float64, 0, len(DefObjectives))
for qu := range DefObjectives {
objectives = append(objectives, qu)
}
sort.Float64s(objectives)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%7 + 1)
vecLength := int(n%3 + 1)
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
his := NewHistogramVec(
HistogramOpts{
Name: "test_histogram",
Help: "helpless",
Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)},
},
[]string{"label"},
)
allVars := make([][]float64, vecLength)
sampleSums := make([]float64, vecLength)
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
picks := make([]int, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
pick := rand.Intn(vecLength)
picks[j] = pick
allVars[pick] = append(allVars[pick], v)
sampleSums[pick] += v
}
go func(vals []float64) {
start.Wait()
for i, v := range vals {
his.WithLabelValues(string('A' + picks[i])).Observe(v)
}
end.Done()
}(vals)
}
for _, vars := range allVars {
sort.Float64s(vars)
}
start.Done()
end.Wait()
for i := 0; i < vecLength; i++ {
m := &dto.Metric{}
s := his.WithLabelValues(string('A' + i))
s.(Histogram).Write(m)
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
t.Errorf("got %d buckets in protobuf, want %d", got, want)
}
if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want {
t.Errorf("got sample count %d, want %d", got, want)
}
if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f, want %f", got, want)
}
wantCounts := getCumulativeCounts(allVars[i])
for j, wantBound := range testBuckets {
if j == len(testBuckets)-1 {
break // No +Inf bucket in protobuf.
}
if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound {
t.Errorf("got bound %f, want %f", gotBound, wantBound)
}
if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount {
t.Errorf("got count %d, want %d", gotCount, wantCount)
}
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func getCumulativeCounts(vars []float64) []uint64 {
counts := make([]uint64, len(testBuckets))
for _, v := range vars {
for i := len(testBuckets) - 1; i >= 0; i-- {
if v > testBuckets[i] {
break
}
counts[i]++
}
}
return counts
}
func TestBuckets(t *testing.T) {
got := LinearBuckets(-15, 5, 6)
want := []float64{-15, -10, -5, 0, 5, 10}
if !reflect.DeepEqual(got, want) {
t.Errorf("linear buckets: got %v, want %v", got, want)
}
got = ExponentialBuckets(100, 1.2, 3)
want = []float64{100, 120, 144}
if !reflect.DeepEqual(got, want) {
t.Errorf("linear buckets: got %v, want %v", got, want)
}
}

View File

@@ -0,0 +1,524 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/common/expfmt"
)
// TODO(beorn7): Remove this whole file. It is a partial mirror of
// promhttp/http.go (to avoid circular import chains) where everything HTTP
// related should live. The functions here are just for avoiding
// breakage. Everything is deprecated.
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
// (which is not instrumented, but can be instrumented with the tooling provided
// in package promhttp).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
//
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather()
if err != nil {
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
})
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}
var instLabels = []string{"method", "code"}
type nower interface {
Now() time.Time
}
type nowFunc func() time.Time
func (n nowFunc) Now() time.Time {
return n()
}
var now nower = nowFunc(func() time.Time {
return time.Now()
})
func nowSeries(t ...time.Time) nower {
return nowFunc(func() time.Time {
defer func() {
t = t[1:]
}()
return t[0]
})
}
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
// registers four metric collectors (if not already done) and reports HTTP
// metrics to the (newly or already) registered collectors: http_requests_total
// (CounterVec), http_request_duration_microseconds (Summary),
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
// has a constant label named "handler" with the provided handlerName as
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
// package promhttp instead. The issues are the following:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
//
// - It uses microseconds as unit, which is deprecated and should be replaced by
// seconds.
//
// - The size of the request is calculated in a separate goroutine. Since this
// calculator requires access to the request header, it creates a race with
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// - It has additional issues with HTTP/2, cf.
// https://github.com/prometheus/client_golang/issues/272.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
// InstrumentHandlerFunc wraps the given function for instrumentation. It
// otherwise works in the same way as InstrumentHandler (and shares the same
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": handlerName},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
handlerFunc,
)
}
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
// issues) but provides more flexibility (at the cost of a more complex call
// syntax). As InstrumentHandler, this function registers four metric
// collectors, but it uses the provided SummaryOpts to create them. However, the
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
// help string. The names of the variable labels of the http_requests_total
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
//
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
// behavior of InstrumentHandler:
//
// prometheus.InstrumentHandlerWithOpts(
// prometheus.SummaryOpts{
// Subsystem: "http",
// ConstLabels: prometheus.Labels{"handler": handlerName},
// },
// handler,
// )
//
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
// and all its fields are set to the equally named fields in the provided
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
// the same issues) but provides more flexibility (at the cost of a more complex
// call syntax). See InstrumentHandlerWithOpts for details how the provided
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{
Namespace: opts.Namespace,
Subsystem: opts.Subsystem,
Name: "requests_total",
Help: "Total number of HTTP requests made.",
ConstLabels: opts.ConstLabels,
},
instLabels,
)
if err := Register(reqCnt); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqCnt = are.ExistingCollector.(*CounterVec)
} else {
panic(err)
}
}
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts)
if err := Register(reqDur); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqDur = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts)
if err := Register(reqSz); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqSz = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts)
if err := Register(resSz); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
resSz = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
delegate := &responseWriterDelegator{ResponseWriter: w}
out := computeApproximateRequestSize(r)
_, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher)
_, hj := w.(http.Hijacker)
_, rf := w.(io.ReaderFrom)
var rw http.ResponseWriter
if cn && fl && hj && rf {
rw = &fancyResponseWriterDelegator{delegate}
} else {
rw = delegate
}
handlerFunc(rw, r)
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
method := sanitizeMethod(r.Method)
code := sanitizeCode(delegate.status)
reqCnt.WithLabelValues(method, code).Inc()
reqDur.Observe(elapsed)
resSz.Observe(float64(delegate.written))
reqSz.Observe(float64(<-out))
})
}
func computeApproximateRequestSize(r *http.Request) <-chan int {
// Get URL length in current go routine for avoiding a race condition.
// HandlerFunc that runs in parallel may modify the URL.
s := 0
if r.URL != nil {
s += len(r.URL.String())
}
out := make(chan int, 1)
go func() {
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
out <- s
close(out)
}()
return out
}
type responseWriterDelegator struct {
http.ResponseWriter
handler, method string
status int
written int64
wroteHeader bool
}
func (r *responseWriterDelegator) WriteHeader(code int) {
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
n, err := r.ResponseWriter.Write(b)
r.written += int64(n)
return n, err
}
type fancyResponseWriterDelegator struct {
*responseWriterDelegator
}
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (f *fancyResponseWriterDelegator) Flush() {
f.ResponseWriter.(http.Flusher).Flush()
}
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return f.ResponseWriter.(http.Hijacker).Hijack()
}
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
if !f.wroteHeader {
f.WriteHeader(http.StatusOK)
}
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
f.written += n
return n, err
}
func sanitizeMethod(m string) string {
switch m {
case "GET", "get":
return "get"
case "PUT", "put":
return "put"
case "HEAD", "head":
return "head"
case "POST", "post":
return "post"
case "DELETE", "delete":
return "delete"
case "CONNECT", "connect":
return "connect"
case "OPTIONS", "options":
return "options"
case "NOTIFY", "notify":
return "notify"
default:
return strings.ToLower(m)
}
}
func sanitizeCode(s int) string {
switch s {
case 100:
return "100"
case 101:
return "101"
case 200:
return "200"
case 201:
return "201"
case 202:
return "202"
case 203:
return "203"
case 204:
return "204"
case 205:
return "205"
case 206:
return "206"
case 300:
return "300"
case 301:
return "301"
case 302:
return "302"
case 304:
return "304"
case 305:
return "305"
case 307:
return "307"
case 400:
return "400"
case 401:
return "401"
case 402:
return "402"
case 403:
return "403"
case 404:
return "404"
case 405:
return "405"
case 406:
return "406"
case 407:
return "407"
case 408:
return "408"
case 409:
return "409"
case 410:
return "410"
case 411:
return "411"
case 412:
return "412"
case 413:
return "413"
case 414:
return "414"
case 415:
return "415"
case 416:
return "416"
case 417:
return "417"
case 418:
return "418"
case 500:
return "500"
case 501:
return "501"
case 502:
return "502"
case 503:
return "503"
case 504:
return "504"
case 505:
return "505"
case 428:
return "428"
case 429:
return "429"
case 431:
return "431"
case 511:
return "511"
default:
return strconv.Itoa(s)
}
}

View File

@@ -0,0 +1,154 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"net/http"
"net/http/httptest"
"testing"
"time"
dto "github.com/prometheus/client_model/go"
)
type respBody string
func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusTeapot)
w.Write([]byte(b))
}
func TestInstrumentHandler(t *testing.T) {
defer func(n nower) {
now = n.(nower)
}(now)
instant := time.Now()
end := instant.Add(30 * time.Second)
now = nowSeries(instant, end)
respBody := respBody("Howdy there!")
hndlr := InstrumentHandler("test-handler", respBody)
opts := SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": "test-handler"},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}
reqCnt := NewCounterVec(
CounterOpts{
Namespace: opts.Namespace,
Subsystem: opts.Subsystem,
Name: "requests_total",
Help: "Total number of HTTP requests made.",
ConstLabels: opts.ConstLabels,
},
instLabels,
)
err := Register(reqCnt)
if err == nil {
t.Fatal("expected reqCnt to be registered already")
}
if are, ok := err.(AlreadyRegisteredError); ok {
reqCnt = are.ExistingCollector.(*CounterVec)
} else {
t.Fatal("unexpected registration error:", err)
}
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts)
err = Register(reqDur)
if err == nil {
t.Fatal("expected reqDur to be registered already")
}
if are, ok := err.(AlreadyRegisteredError); ok {
reqDur = are.ExistingCollector.(Summary)
} else {
t.Fatal("unexpected registration error:", err)
}
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts)
err = Register(reqSz)
if err == nil {
t.Fatal("expected reqSz to be registered already")
}
if _, ok := err.(AlreadyRegisteredError); !ok {
t.Fatal("unexpected registration error:", err)
}
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts)
err = Register(resSz)
if err == nil {
t.Fatal("expected resSz to be registered already")
}
if _, ok := err.(AlreadyRegisteredError); !ok {
t.Fatal("unexpected registration error:", err)
}
reqCnt.Reset()
resp := httptest.NewRecorder()
req := &http.Request{
Method: "GET",
}
hndlr.ServeHTTP(resp, req)
if resp.Code != http.StatusTeapot {
t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code)
}
if string(resp.Body.Bytes()) != "Howdy there!" {
t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes()))
}
out := &dto.Metric{}
reqDur.Write(out)
if want, got := "test-handler", out.Label[0].GetValue(); want != got {
t.Errorf("want label value %q in reqDur, got %q", want, got)
}
if want, got := uint64(1), out.Summary.GetSampleCount(); want != got {
t.Errorf("want sample count %d in reqDur, got %d", want, got)
}
out.Reset()
if want, got := 1, len(reqCnt.children); want != got {
t.Errorf("want %d children in reqCnt, got %d", want, got)
}
cnt, err := reqCnt.GetMetricWithLabelValues("get", "418")
if err != nil {
t.Fatal(err)
}
cnt.Write(out)
if want, got := "418", out.Label[0].GetValue(); want != got {
t.Errorf("want label value %q in reqCnt, got %q", want, got)
}
if want, got := "test-handler", out.Label[1].GetValue(); want != got {
t.Errorf("want label value %q in reqCnt, got %q", want, got)
}
if want, got := "get", out.Label[2].GetValue(); want != got {
t.Errorf("want label value %q in reqCnt, got %q", want, got)
}
if out.Counter == nil {
t.Fatal("expected non-nil counter in reqCnt")
}
if want, got := 1., out.Counter.GetValue(); want != got {
t.Errorf("want reqCnt of %f, got %f", want, got)
}
}

View File

@@ -0,0 +1,57 @@
package prometheus
import (
"errors"
"fmt"
"strings"
"unicode/utf8"
"github.com/prometheus/common/model"
)
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
return errInconsistentCardinality
}
for name, val := range labels {
if !utf8.ValidString(val) {
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
}
}
return nil
}
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
return errInconsistentCardinality
}
for _, val := range vals {
if !utf8.ValidString(val) {
return fmt.Errorf("label value %q is not valid UTF-8", val)
}
}
return nil
}
func checkLabelName(l string) bool {
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
}

View File

@@ -0,0 +1,166 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"strings"
dto "github.com/prometheus/client_model/go"
)
const separatorByte byte = 255
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
// Histogram, Summary, and Untyped.
type Metric interface {
// Desc returns the descriptor for the Metric. This method idempotently
// returns the same descriptor throughout the lifetime of the
// Metric. The returned descriptor is immutable by contract. A Metric
// unable to describe itself must return an invalid descriptor (created
// with NewInvalidDesc).
Desc() *Desc
// Write encodes the Metric into a "Metric" Protocol Buffer data
// transmission object.
//
// Metric implementations must observe concurrency safety as reads of
// this metric may occur at any time, and any blocking occurs at the
// expense of total performance of rendering all registered
// metrics. Ideally, Metric implementations should support concurrent
// readers.
//
// While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. (Implementers may find
// LabelPairSorter useful for that.) Callers of Write should still make
// sure of sorting if they depend on it.
Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
// signature of this method should be changed to "Write() (*dto.Metric,
// error)".
}
// Opts bundles the options for creating most Metric types. Each metric
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
// an alias of this type (which might change when the requirement arises.)
//
// It is mandatory to set Name and Help to a non-empty string. All other fields
// are optional and can safely be left at their zero value.
type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the metric must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this metric. Mandatory!
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this metric. Metrics
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
// Note that in most cases, labels have a value that varies during the
// lifetime of a process. Those labels are usually managed with a metric
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
// serve only special purposes. One is for the special case where the
// value of a label does not change during the lifetime of a process,
// e.g. if the revision of the running binary is put into a
// label. Another, more advanced purpose is if more than one Collector
// needs to collect Metrics with the same fully-qualified name. In that
// case, those Metrics must differ in the values of their
// ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels
}
// BuildFQName joins the given three name components by "_". Empty name
// components are ignored. If the name parameter itself is empty, an empty
// string is returned, no matter what. Metric implementations included in this
// library use this function internally to generate the fully-qualified metric
// name from the name component in their Opts. Users of the library will only
// need this function if they implement their own Metric or instantiate a Desc
// (with NewDesc) directly.
func BuildFQName(namespace, subsystem, name string) string {
if name == "" {
return ""
}
switch {
case namespace != "" && subsystem != "":
return strings.Join([]string{namespace, subsystem, name}, "_")
case namespace != "":
return strings.Join([]string{namespace, name}, "_")
case subsystem != "":
return strings.Join([]string{subsystem, name}, "_")
}
return name
}
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
// dto.LabelPair pointers. This is useful for implementing the Write method of
// custom metrics.
type LabelPairSorter []*dto.LabelPair
func (s LabelPairSorter) Len() int {
return len(s)
}
func (s LabelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s LabelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName()
}
type hashSorter []uint64
func (s hashSorter) Len() int {
return len(s)
}
func (s hashSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s hashSorter) Less(i, j int) bool {
return s[i] < s[j]
}
type invalidMetric struct {
desc *Desc
err error
}
// NewInvalidMetric returns a metric whose Write method always returns the
// provided error. It is useful if a Collector finds itself unable to collect
// a metric and wishes to report an error to the registry.
func NewInvalidMetric(desc *Desc, err error) Metric {
return &invalidMetric{desc, err}
}
func (m *invalidMetric) Desc() *Desc { return m.desc }
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }

View File

@@ -0,0 +1,35 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "testing"
func TestBuildFQName(t *testing.T) {
scenarios := []struct{ namespace, subsystem, name, result string }{
{"a", "b", "c", "a_b_c"},
{"", "b", "c", "b_c"},
{"a", "", "c", "a_c"},
{"", "", "c", "c"},
{"a", "b", "", ""},
{"a", "", "", ""},
{"", "b", "", ""},
{" ", "", "", ""},
}
for i, s := range scenarios {
if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got {
t.Errorf("%d. want %s, got %s", i, want, got)
}
}
}

View File

@@ -0,0 +1,50 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Observer is the interface that wraps the Observe method, which is used by
// Histogram and Summary to add observations.
type Observer interface {
Observe(float64)
}
// The ObserverFunc type is an adapter to allow the use of ordinary
// functions as Observers. If f is a function with the appropriate
// signature, ObserverFunc(f) is an Observer that calls f.
//
// This adapter is usually used in connection with the Timer type, and there are
// two general use cases:
//
// The most common one is to use a Gauge as the Observer for a Timer.
// See the "Gauge" Timer example.
//
// The more advanced use case is to create a function that dynamically decides
// which Observer to use for observing the duration. See the "Complex" Timer
// example.
type ObserverFunc func(float64)
// Observe calls f(value). It implements Observer.
func (f ObserverFunc) Observe(value float64) {
f(value)
}
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
type ObserverVec interface {
GetMetricWith(Labels) (Observer, error)
GetMetricWithLabelValues(lvs ...string) (Observer, error)
With(Labels) Observer
WithLabelValues(...string) Observer
Collector
}

View File

@@ -0,0 +1,140 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "github.com/prometheus/procfs"
type processCollector struct {
pid int
collectFn func(chan<- Metric)
pidFn func() (int, error)
cpuTotal *Desc
openFDs, maxFDs *Desc
vsize, rss *Desc
startTime *Desc
}
// NewProcessCollector returns a collector which exports the current state of
// process metrics including cpu, memory and file descriptor usage as well as
// the process start time for the given process id under the given namespace.
func NewProcessCollector(pid int, namespace string) Collector {
return NewProcessCollectorPIDFn(
func() (int, error) { return pid, nil },
namespace,
)
}
// NewProcessCollectorPIDFn returns a collector which exports the current state
// of process metrics including cpu, memory and file descriptor usage as well
// as the process start time under the given namespace. The given pidFn is
// called on each collect and is used to determine the process to export
// metrics for.
func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
) Collector {
ns := ""
if len(namespace) > 0 {
ns = namespace + "_"
}
c := processCollector{
pidFn: pidFn,
collectFn: func(chan<- Metric) {},
cpuTotal: NewDesc(
ns+"process_cpu_seconds_total",
"Total user and system CPU time spent in seconds.",
nil, nil,
),
openFDs: NewDesc(
ns+"process_open_fds",
"Number of open file descriptors.",
nil, nil,
),
maxFDs: NewDesc(
ns+"process_max_fds",
"Maximum number of open file descriptors.",
nil, nil,
),
vsize: NewDesc(
ns+"process_virtual_memory_bytes",
"Virtual memory size in bytes.",
nil, nil,
),
rss: NewDesc(
ns+"process_resident_memory_bytes",
"Resident memory size in bytes.",
nil, nil,
),
startTime: NewDesc(
ns+"process_start_time_seconds",
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
}
// Set up process metric collection if supported by the runtime.
if _, err := procfs.NewStat(); err == nil {
c.collectFn = c.processCollect
}
return &c
}
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
ch <- c.rss
ch <- c.startTime
}
// Collect returns the current state of all metrics of the collector.
func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch)
}
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
return
}
p, err := procfs.NewProc(pid)
if err != nil {
return
}
if stat, err := p.NewStat(); err == nil {
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
}
if limits, err := p.NewLimits(); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
}
}

View File

@@ -0,0 +1,58 @@
package prometheus
import (
"bytes"
"os"
"regexp"
"testing"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/procfs"
)
func TestProcessCollector(t *testing.T) {
if _, err := procfs.Self(); err != nil {
t.Skipf("skipping TestProcessCollector, procfs not available: %s", err)
}
registry := NewRegistry()
if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil {
t.Fatal(err)
}
if err := registry.Register(NewProcessCollectorPIDFn(
func() (int, error) { return os.Getpid(), nil }, "foobar"),
); err != nil {
t.Fatal(err)
}
mfs, err := registry.Gather()
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
for _, mf := range mfs {
if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil {
t.Fatal(err)
}
}
for _, re := range []*regexp.Regexp{
regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"),
regexp.MustCompile("\nprocess_max_fds [1-9]"),
regexp.MustCompile("\nprocess_open_fds [1-9]"),
regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"),
regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"),
regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"),
regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"),
regexp.MustCompile("\nfoobar_process_max_fds [1-9]"),
regexp.MustCompile("\nfoobar_process_open_fds [1-9]"),
regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"),
regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"),
regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"),
} {
if !re.Match(buf.Bytes()) {
t.Errorf("want body to match %s\n%s", re, buf.String())
}
}
}

View File

@@ -0,0 +1,199 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"bufio"
"io"
"net"
"net/http"
)
const (
closeNotifier = 1 << iota
flusher
hijacker
readerFrom
pusher
)
type delegator interface {
http.ResponseWriter
Status() int
Written() int64
}
type responseWriterDelegator struct {
http.ResponseWriter
handler, method string
status int
written int64
wroteHeader bool
observeWriteHeader func(int)
}
func (r *responseWriterDelegator) Status() int {
return r.status
}
func (r *responseWriterDelegator) Written() int64 {
return r.written
}
func (r *responseWriterDelegator) WriteHeader(code int) {
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
if r.observeWriteHeader != nil {
r.observeWriteHeader(code)
}
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
n, err := r.ResponseWriter.Write(b)
r.written += int64(n)
return n, err
}
type closeNotifierDelegator struct{ *responseWriterDelegator }
type flusherDelegator struct{ *responseWriterDelegator }
type hijackerDelegator struct{ *responseWriterDelegator }
type readerFromDelegator struct{ *responseWriterDelegator }
func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d *flusherDelegator) Flush() {
d.ResponseWriter.(http.Flusher).Flush()
}
func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
d.written += n
return n, err
}
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
func init() {
// TODO(beorn7): Code generation would help here.
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
return d
}
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
return closeNotifierDelegator{d}
}
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
return flusherDelegator{d}
}
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
return struct {
*responseWriterDelegator
http.Flusher
http.CloseNotifier
}{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
return hijackerDelegator{d}
}
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
return struct {
*responseWriterDelegator
http.Hijacker
http.CloseNotifier
}{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
}{d, &hijackerDelegator{d}, &flusherDelegator{d}}
}
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
return readerFromDelegator{d}
}
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
return struct {
*responseWriterDelegator
io.ReaderFrom
http.CloseNotifier
}{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
}{d, &readerFromDelegator{d}, &flusherDelegator{d}}
}
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
}
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
}

View File

@@ -0,0 +1,181 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"io"
"net/http"
)
type pusherDelegator struct{ *responseWriterDelegator }
func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
func init() {
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
return pusherDelegator{d}
}
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
return struct {
*responseWriterDelegator
http.Pusher
http.CloseNotifier
}{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
}{d, &pusherDelegator{d}, &flusherDelegator{d}}
}
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
http.CloseNotifier
}{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
}{d, &pusherDelegator{d}, &hijackerDelegator{d}}
}
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.CloseNotifier
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
}{d, &pusherDelegator{d}, &readerFromDelegator{d}}
}
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.CloseNotifier
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
}
}
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
if _, ok := w.(http.Pusher); ok {
id += pusher
}
return pickDelegator[id](d)
}

View File

@@ -0,0 +1,44 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package promhttp
import (
"io"
"net/http"
)
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
return pickDelegator[id](d)
}

View File

@@ -0,0 +1,204 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package promhttp provides tooling around HTTP servers and clients.
//
// First, the package allows the creation of http.Handler instances to expose
// Prometheus metrics via HTTP. promhttp.Handler acts on the
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
// custom registry or anything that implements the Gatherer interface. It also
// allows the creation of handlers that act differently on errors or allow to
// log errors.
//
// Second, the package provides tooling to instrument instances of http.Handler
// via middleware. Middleware wrappers follow the naming scheme
// InstrumentHandlerX, where X describes the intended use of the middleware.
// See each function's doc comment for specific details.
//
// Finally, the package allows for an http.RoundTripper to be instrumented via
// middleware. Middleware wrappers follow the naming scheme
// InstrumentRoundTripperX, where X describes the intended use of the
// middleware. See each function's doc comment for specific details.
package promhttp
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"net/http"
"strings"
"sync"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/prometheus"
)
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
// error, no error logging, and compression if requested by the client.
//
// If you want to create a Handler for the DefaultGatherer with different
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
// your desired HandlerOpts.
func Handler() http.Handler {
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
}
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
// of the Handler is defined by the provided HandlerOpts.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := reg.Gather()
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err)
}
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
if len(mfs) == 0 {
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding metric family:", err)
}
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
// Handled later.
case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
// TODO(beorn7): Consider streaming serving of metrics.
})
}
// HandlerErrorHandling defines how a Handler serving metrics will handle
// errors.
type HandlerErrorHandling int
// These constants cause handlers serving metrics to behave as described if
// errors are encountered.
const (
// Serve an HTTP status code 500 upon the first error
// encountered. Report the error message in the body.
HTTPErrorOnError HandlerErrorHandling = iota
// Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the
// last error message in the body. Only use this in deliberate "best
// effort" metrics collection scenarios. It is recommended to at least
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
// errors completely.
ContinueOnError
// Panic upon the first error encountered (useful for "crash only" apps).
PanicOnError
)
// Logger is the minimal interface HandlerOpts needs for logging. Note that
// log.Logger from the standard library implements this interface, and it is
// easy to implement by custom loggers, if they don't do so already anyway.
type Logger interface {
Println(v ...interface{})
}
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct {
// ErrorLog specifies an optional logger for errors collecting and
// serving metrics. If nil, errors are not logged at all.
ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog
// is not nil.
ErrorHandling HandlerErrorHandling
// If DisableCompression is true, the handler will never compress the
// response, even if requested by the client.
DisableCompression bool
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
if compressionDisabled {
return writer, ""
}
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}

View File

@@ -0,0 +1,131 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"bytes"
"errors"
"log"
"net/http"
"net/http/httptest"
"testing"
"github.com/prometheus/client_golang/prometheus"
)
type errorCollector struct{}
func (e errorCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil)
}
func (e errorCollector) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.NewInvalidMetric(
prometheus.NewDesc("invalid_metric", "not helpful", nil, nil),
errors.New("collect error"),
)
}
func TestHandlerErrorHandling(t *testing.T) {
// Create a registry that collects a MetricFamily with two elements,
// another with one, and reports an error.
reg := prometheus.NewRegistry()
cnt := prometheus.NewCounter(prometheus.CounterOpts{
Name: "the_count",
Help: "Ah-ah-ah! Thunder and lightning!",
})
reg.MustRegister(cnt)
cntVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
cntVec.WithLabelValues("val1").Inc()
cntVec.WithLabelValues("val2").Inc()
reg.MustRegister(cntVec)
reg.MustRegister(errorCollector{})
logBuf := &bytes.Buffer{}
logger := log.New(logBuf, "", 0)
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request.Header.Add("Accept", "test/plain")
errorHandler := HandlerFor(reg, HandlerOpts{
ErrorLog: logger,
ErrorHandling: HTTPErrorOnError,
})
continueHandler := HandlerFor(reg, HandlerOpts{
ErrorLog: logger,
ErrorHandling: ContinueOnError,
})
panicHandler := HandlerFor(reg, HandlerOpts{
ErrorLog: logger,
ErrorHandling: PanicOnError,
})
wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
`
wantErrorBody := `An error has occurred during metrics gathering:
error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
`
wantOKBody := `# HELP name docstring
# TYPE name counter
name{constname="constvalue",labelname="val1"} 1
name{constname="constvalue",labelname="val2"} 1
# HELP the_count Ah-ah-ah! Thunder and lightning!
# TYPE the_count counter
the_count 0
`
errorHandler.ServeHTTP(writer, request)
if got, want := writer.Code, http.StatusInternalServerError; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
if got := logBuf.String(); got != wantMsg {
t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg)
}
if got := writer.Body.String(); got != wantErrorBody {
t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody)
}
logBuf.Reset()
writer.Body.Reset()
writer.Code = http.StatusOK
continueHandler.ServeHTTP(writer, request)
if got, want := writer.Code, http.StatusOK; got != want {
t.Errorf("got HTTP status code %d, want %d", got, want)
}
if got := logBuf.String(); got != wantMsg {
t.Errorf("got log message %q, want %q", got, wantMsg)
}
if got := writer.Body.String(); got != wantOKBody {
t.Errorf("got body %q, want %q", got, wantOKBody)
}
defer func() {
if err := recover(); err == nil {
t.Error("expected panic from panicHandler")
}
}()
panicHandler.ServeHTTP(writer, request)
}

View File

@@ -0,0 +1,98 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
)
// The RoundTripperFunc type is an adapter to allow the use of ordinary
// functions as RoundTrippers. If f is a function with the appropriate
// signature, RountTripperFunc(f) is a RoundTripper that calls f.
type RoundTripperFunc func(req *http.Request) (*http.Response, error)
// RoundTrip implements the RoundTripper interface.
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
return rt(r)
}
// InstrumentRoundTripperInFlight is a middleware that wraps the provided
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.RoundTripper.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
gauge.Inc()
defer gauge.Dec()
return next.RoundTrip(r)
})
}
// InstrumentRoundTripperCounter is a middleware that wraps the provided
// http.RoundTripper to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two labels. The only allowed label
// names are "code" and "method". The function panics if any other instance
// labels are provided. Partitioning of the CounterVec happens by HTTP status
// code and/or HTTP method if the respective instance label names are present
// in the CounterVec. For unpartitioned counting, use a CounterVec with
// zero labels.
//
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
// is not incremented.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
code, method := checkLabels(counter)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
}
return resp, err
})
}
// InstrumentRoundTripperDuration is a middleware that wraps the provided
// http.RoundTripper to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two labels. The only allowed label
// names are "code" and "method". The function panics if any other instance
// labels are provided. The Observe method of the Observer in the ObserverVec
// is called with the request duration in seconds. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
// reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
code, method := checkLabels(obs)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
}
return resp, err
})
}

View File

@@ -0,0 +1,144 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"context"
"crypto/tls"
"net/http"
"net/http/httptrace"
"time"
)
// InstrumentTrace is used to offer flexibility in instrumenting the available
// httptrace.ClientTrace hook functions. Each function is passed a float64
// representing the time in seconds since the start of the http request. A user
// may choose to use separately buckets Histograms, or implement custom
// instance labels on a per function basis.
type InstrumentTrace struct {
GotConn func(float64)
PutIdleConn func(float64)
GotFirstResponseByte func(float64)
Got100Continue func(float64)
DNSStart func(float64)
DNSDone func(float64)
ConnectStart func(float64)
ConnectDone func(float64)
TLSHandshakeStart func(float64)
TLSHandshakeDone func(float64)
WroteHeaders func(float64)
Wait100Continue func(float64)
WroteRequest func(float64)
}
// InstrumentRoundTripperTrace is a middleware that wraps the provided
// RoundTripper and reports times to hook functions provided in the
// InstrumentTrace struct. Hook functions that are not present in the provided
// InstrumentTrace struct are ignored. Times reported to the hook functions are
// time since the start of the request. Only with Go1.9+, those times are
// guaranteed to never be negative. (Earlier Go versions are not using a
// monotonic clock.) Note that partitioning of Histograms is expensive and
// should be used judiciously.
//
// For hook functions that receive an error as an argument, no observations are
// made in the event of a non-nil error value.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
GotConn: func(_ httptrace.GotConnInfo) {
if it.GotConn != nil {
it.GotConn(time.Since(start).Seconds())
}
},
PutIdleConn: func(err error) {
if err != nil {
return
}
if it.PutIdleConn != nil {
it.PutIdleConn(time.Since(start).Seconds())
}
},
DNSStart: func(_ httptrace.DNSStartInfo) {
if it.DNSStart != nil {
it.DNSStart(time.Since(start).Seconds())
}
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
if it.DNSStart != nil {
it.DNSStart(time.Since(start).Seconds())
}
},
ConnectStart: func(_, _ string) {
if it.ConnectStart != nil {
it.ConnectStart(time.Since(start).Seconds())
}
},
ConnectDone: func(_, _ string, err error) {
if err != nil {
return
}
if it.ConnectDone != nil {
it.ConnectDone(time.Since(start).Seconds())
}
},
GotFirstResponseByte: func() {
if it.GotFirstResponseByte != nil {
it.GotFirstResponseByte(time.Since(start).Seconds())
}
},
Got100Continue: func() {
if it.Got100Continue != nil {
it.Got100Continue(time.Since(start).Seconds())
}
},
TLSHandshakeStart: func() {
if it.TLSHandshakeStart != nil {
it.TLSHandshakeStart(time.Since(start).Seconds())
}
},
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
if err != nil {
return
}
if it.TLSHandshakeDone != nil {
it.TLSHandshakeDone(time.Since(start).Seconds())
}
},
WroteHeaders: func() {
if it.WroteHeaders != nil {
it.WroteHeaders(time.Since(start).Seconds())
}
},
Wait100Continue: func() {
if it.Wait100Continue != nil {
it.Wait100Continue(time.Since(start).Seconds())
}
},
WroteRequest: func(_ httptrace.WroteRequestInfo) {
if it.WroteRequest != nil {
it.WroteRequest(time.Since(start).Seconds())
}
},
}
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
return next.RoundTrip(r)
})
}

View File

@@ -0,0 +1,195 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package promhttp
import (
"log"
"net/http"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
)
func TestClientMiddlewareAPI(t *testing.T) {
client := http.DefaultClient
client.Timeout = 1 * time.Second
reg := prometheus.NewRegistry()
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "client_in_flight_requests",
Help: "A gauge of in-flight requests for the wrapped client.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "client_api_requests_total",
Help: "A counter for requests from the wrapped client.",
},
[]string{"code", "method"},
)
dnsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "dns_duration_seconds",
Help: "Trace dns latency histogram.",
Buckets: []float64{.005, .01, .025, .05},
},
[]string{"event"},
)
tlsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "tls_duration_seconds",
Help: "Trace tls latency histogram.",
Buckets: []float64{.05, .1, .25, .5},
},
[]string{"event"},
)
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "A histogram of request latencies.",
Buckets: prometheus.DefBuckets,
},
[]string{"method"},
)
reg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)
trace := &InstrumentTrace{
DNSStart: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_start")
},
DNSDone: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_done")
},
TLSHandshakeStart: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_start")
},
TLSHandshakeDone: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_done")
},
}
client.Transport = InstrumentRoundTripperInFlight(inFlightGauge,
InstrumentRoundTripperCounter(counter,
InstrumentRoundTripperTrace(trace,
InstrumentRoundTripperDuration(histVec, http.DefaultTransport),
),
),
)
resp, err := client.Get("http://google.com")
if err != nil {
t.Fatalf("%v", err)
}
defer resp.Body.Close()
}
func ExampleInstrumentRoundTripperDuration() {
client := http.DefaultClient
client.Timeout = 1 * time.Second
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "client_in_flight_requests",
Help: "A gauge of in-flight requests for the wrapped client.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "client_api_requests_total",
Help: "A counter for requests from the wrapped client.",
},
[]string{"code", "method"},
)
// dnsLatencyVec uses custom buckets based on expected dns durations.
// It has an instance label "event", which is set in the
// DNSStart and DNSDonehook functions defined in the
// InstrumentTrace struct below.
dnsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "dns_duration_seconds",
Help: "Trace dns latency histogram.",
Buckets: []float64{.005, .01, .025, .05},
},
[]string{"event"},
)
// tlsLatencyVec uses custom buckets based on expected tls durations.
// It has an instance label "event", which is set in the
// TLSHandshakeStart and TLSHandshakeDone hook functions defined in the
// InstrumentTrace struct below.
tlsLatencyVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "tls_duration_seconds",
Help: "Trace tls latency histogram.",
Buckets: []float64{.05, .1, .25, .5},
},
[]string{"event"},
)
// histVec has no labels, making it a zero-dimensional ObserverVec.
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "A histogram of request latencies.",
Buckets: prometheus.DefBuckets,
},
[]string{},
)
// Register all of the metrics in the standard registry.
prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)
// Define functions for the available httptrace.ClientTrace hook
// functions that we want to instrument.
trace := &InstrumentTrace{
DNSStart: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_start")
},
DNSDone: func(t float64) {
dnsLatencyVec.WithLabelValues("dns_done")
},
TLSHandshakeStart: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_start")
},
TLSHandshakeDone: func(t float64) {
tlsLatencyVec.WithLabelValues("tls_handshake_done")
},
}
// Wrap the default RoundTripper with middleware.
roundTripper := InstrumentRoundTripperInFlight(inFlightGauge,
InstrumentRoundTripperCounter(counter,
InstrumentRoundTripperTrace(trace,
InstrumentRoundTripperDuration(histVec, http.DefaultTransport),
),
),
)
// Set the RoundTripper on our client.
client.Transport = roundTripper
resp, err := client.Get("http://google.com")
if err != nil {
log.Printf("error: %v", err)
}
defer resp.Body.Close()
}

View File

@@ -0,0 +1,440 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"net/http"
"strconv"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
// InstrumentHandlerInFlight is a middleware that wraps the provided
// http.Handler. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.Handler.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.Inc()
defer g.Dec()
next.ServeHTTP(w, r)
})
}
// InstrumentHandlerDuration is a middleware that wraps the provided
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two labels. The only allowed label
// names are "code" and "method". The function panics if any other instance
// labels are provided. The Observe method of the Observer in the ObserverVec
// is called with the request duration in seconds. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
})
}
// InstrumentHandlerCounter is a middleware that wraps the provided
// http.Handler to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two labels. The only allowed label
// names are "code" and "method". The function panics if any other instance
// labels are provided. Partitioning of the CounterVec happens by HTTP status
// code and/or HTTP method if the respective instance label names are present
// in the CounterVec. For unpartitioned counting, use a CounterVec with
// zero labels.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, the Counter is not incremented.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(counter)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
counter.With(labels(code, method, r.Method, d.Status())).Inc()
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
counter.With(labels(code, method, r.Method, 0)).Inc()
})
}
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
// http.Handler to observe with the provided ObserverVec the request duration
// until the response headers are written. The ObserverVec must have zero, one,
// or two labels. The only allowed label names are "code" and "method". The
// function panics if any other instance labels are provided. The Observe
// method of the Observer in the ObserverVec is called with the request
// duration in seconds. Partitioning happens by HTTP status code and/or HTTP
// method if the respective instance label names are present in the
// ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
//
// If the wrapped Handler panics before calling WriteHeader, no value is
// reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
})
next.ServeHTTP(d, r)
})
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
// http.Handler to observe the request size with the provided ObserverVec.
// The ObserverVec must have zero, one, or two labels. The only allowed label
// names are "code" and "method". The function panics if any other instance
// labels are provided. The Observe method of the Observer in the ObserverVec
// is called with the request size in bytes. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
})
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
// http.Handler to observe the response size with the provided ObserverVec.
// The ObserverVec must have zero, one, or two labels. The only allowed label
// names are "code" and "method". The function panics if any other instance
// labels are provided. The Observe method of the Observer in the ObserverVec
// is called with the response size in bytes. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
})
}
func checkLabels(c prometheus.Collector) (code bool, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
var (
desc *prometheus.Desc
pm dto.Metric
)
descc := make(chan *prometheus.Desc, 1)
c.Describe(descc)
select {
case desc = <-descc:
default:
panic("no description provided by collector")
}
select {
case <-descc:
panic("more than one description provided by collector")
default:
}
close(descc)
if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil {
return
}
if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil {
if err := m.Write(&pm); err != nil {
panic("error checking metric for labels")
}
for _, label := range pm.Label {
name, value := label.GetName(), label.GetValue()
if value != magicString {
continue
}
switch name {
case "code":
code = true
case "method":
method = true
default:
panic("metric partitioned with non-supported labels")
}
return
}
panic("previously set label not found this must never happen")
}
if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil {
if err := m.Write(&pm); err != nil {
panic("error checking metric for labels")
}
for _, label := range pm.Label {
name, value := label.GetName(), label.GetValue()
if value != magicString {
continue
}
if name == "code" || name == "method" {
continue
}
panic("metric partitioned with non-supported labels")
}
code = true
method = true
return
}
panic("metric partitioned with non-supported labels")
}
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
labels := prometheus.Labels{}
if code {
labels["code"] = sanitizeCode(status)
}
if method {
labels["method"] = sanitizeMethod(reqMethod)
}
return labels
}
func computeApproximateRequestSize(r *http.Request) int {
s := 0
if r.URL != nil {
s += len(r.URL.String())
}
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
return s
}
func sanitizeMethod(m string) string {
switch m {
case "GET", "get":
return "get"
case "PUT", "put":
return "put"
case "HEAD", "head":
return "head"
case "POST", "post":
return "post"
case "DELETE", "delete":
return "delete"
case "CONNECT", "connect":
return "connect"
case "OPTIONS", "options":
return "options"
case "NOTIFY", "notify":
return "notify"
default:
return strings.ToLower(m)
}
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
switch s {
case 100:
return "100"
case 101:
return "101"
case 200, 0:
return "200"
case 201:
return "201"
case 202:
return "202"
case 203:
return "203"
case 204:
return "204"
case 205:
return "205"
case 206:
return "206"
case 300:
return "300"
case 301:
return "301"
case 302:
return "302"
case 304:
return "304"
case 305:
return "305"
case 307:
return "307"
case 400:
return "400"
case 401:
return "401"
case 402:
return "402"
case 403:
return "403"
case 404:
return "404"
case 405:
return "405"
case 406:
return "406"
case 407:
return "407"
case 408:
return "408"
case 409:
return "409"
case 410:
return "410"
case 411:
return "411"
case 412:
return "412"
case 413:
return "413"
case 414:
return "414"
case 415:
return "415"
case 416:
return "416"
case 417:
return "417"
case 418:
return "418"
case 500:
return "500"
case 501:
return "501"
case 502:
return "502"
case 503:
return "503"
case 504:
return "504"
case 505:
return "505"
case 428:
return "428"
case 429:
return "429"
case 431:
return "431"
case 511:
return "511"
default:
return strconv.Itoa(s)
}
}

View File

@@ -0,0 +1,233 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"io"
"log"
"net/http"
"net/http/httptest"
"testing"
"github.com/prometheus/client_golang/prometheus"
)
func TestMiddlewareAPI(t *testing.T) {
reg := prometheus.NewRegistry()
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "in_flight_requests",
Help: "A gauge of requests currently being served by the wrapped handler.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "api_requests_total",
Help: "A counter for requests to the wrapped handler.",
},
[]string{"code", "method"},
)
histVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "response_duration_seconds",
Help: "A histogram of request latencies.",
Buckets: prometheus.DefBuckets,
ConstLabels: prometheus.Labels{"handler": "api"},
},
[]string{"method"},
)
writeHeaderVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "write_header_duration_seconds",
Help: "A histogram of time to first write latencies.",
Buckets: prometheus.DefBuckets,
ConstLabels: prometheus.Labels{"handler": "api"},
},
[]string{},
)
responseSize := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "push_request_size_bytes",
Help: "A histogram of request sizes for requests.",
Buckets: []float64{200, 500, 900, 1500},
},
[]string{},
)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("OK"))
})
reg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec)
chain := InstrumentHandlerInFlight(inFlightGauge,
InstrumentHandlerCounter(counter,
InstrumentHandlerDuration(histVec,
InstrumentHandlerTimeToWriteHeader(writeHeaderVec,
InstrumentHandlerResponseSize(responseSize, handler),
),
),
),
)
r, _ := http.NewRequest("GET", "www.example.com", nil)
w := httptest.NewRecorder()
chain.ServeHTTP(w, r)
}
func TestInstrumentTimeToFirstWrite(t *testing.T) {
var i int
dobs := &responseWriterDelegator{
ResponseWriter: httptest.NewRecorder(),
observeWriteHeader: func(status int) {
i = status
},
}
d := newDelegator(dobs, nil)
d.WriteHeader(http.StatusOK)
if i != http.StatusOK {
t.Fatalf("failed to execute observeWriteHeader")
}
}
// testResponseWriter is an http.ResponseWriter that also implements
// http.CloseNotifier, http.Flusher, and io.ReaderFrom.
type testResponseWriter struct {
closeNotifyCalled, flushCalled, readFromCalled bool
}
func (t *testResponseWriter) Header() http.Header { return nil }
func (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil }
func (t *testResponseWriter) WriteHeader(int) {}
func (t *testResponseWriter) CloseNotify() <-chan bool {
t.closeNotifyCalled = true
return nil
}
func (t *testResponseWriter) Flush() { t.flushCalled = true }
func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) {
t.readFromCalled = true
return 0, nil
}
func TestInterfaceUpgrade(t *testing.T) {
w := &testResponseWriter{}
d := newDelegator(w, nil)
d.(http.CloseNotifier).CloseNotify()
if !w.closeNotifyCalled {
t.Error("CloseNotify not called")
}
d.(http.Flusher).Flush()
if !w.flushCalled {
t.Error("Flush not called")
}
d.(io.ReaderFrom).ReadFrom(nil)
if !w.readFromCalled {
t.Error("ReadFrom not called")
}
if _, ok := d.(http.Hijacker); ok {
t.Error("delegator unexpectedly implements http.Hijacker")
}
}
func ExampleInstrumentHandlerDuration() {
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "in_flight_requests",
Help: "A gauge of requests currently being served by the wrapped handler.",
})
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "api_requests_total",
Help: "A counter for requests to the wrapped handler.",
},
[]string{"code", "method"},
)
// pushVec and pullVec are partitioned by the HTTP method and use custom
// buckets based on the expected request duration. ConstLabels are used
// to set a handler label to mark pushVec as tracking the durations for
// pushes and pullVec as tracking the durations for pulls. Note that
// Name, Help, and Buckets need to be the same for consistency, so we
// use the same HistogramOpts after just modifying the ConstLabels.
histogramOpts := prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "A histogram of latencies for requests.",
Buckets: []float64{.25, .5, 1, 2.5, 5, 10},
ConstLabels: prometheus.Labels{"handler": "push"},
}
pushVec := prometheus.NewHistogramVec(
histogramOpts,
[]string{"method"},
)
histogramOpts.ConstLabels = prometheus.Labels{"handler": "pull"}
pullVec := prometheus.NewHistogramVec(
histogramOpts,
[]string{"method"},
)
// responseSize has no labels, making it a zero-dimensional
// ObserverVec.
responseSize := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "response_size_bytes",
Help: "A histogram of response sizes for requests.",
Buckets: []float64{200, 500, 900, 1500},
},
[]string{},
)
// Create the handlers that will be wrapped by the middleware.
pushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Push"))
})
pullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Pull"))
})
// Register all of the metrics in the standard registry.
prometheus.MustRegister(inFlightGauge, counter, pullVec, pushVec, responseSize)
// Wrap the pushHandler with our shared middleware, but use the
// endpoint-specific pushVec with InstrumentHandlerDuration.
pushChain := InstrumentHandlerInFlight(inFlightGauge,
InstrumentHandlerCounter(counter,
InstrumentHandlerDuration(pushVec,
InstrumentHandlerResponseSize(responseSize, pushHandler),
),
),
)
// Wrap the pushHandler with the shared middleware, but use the
// endpoint-specific pullVec with InstrumentHandlerDuration.
pullChain := InstrumentHandlerInFlight(inFlightGauge,
InstrumentHandlerCounter(counter,
InstrumentHandlerDuration(pullVec,
InstrumentHandlerResponseSize(responseSize, pullHandler),
),
),
)
http.Handle("/metrics", Handler())
http.Handle("/push", pushChain)
http.Handle("/pull", pullChain)
if err := http.ListenAndServe(":3000", nil); err != nil {
log.Fatal(err)
}
}

View File

@@ -0,0 +1,84 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package push_test
import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
)
var (
completionTime = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds",
Help: "The timestamp of the last completion of a DB backup, successful or not.",
})
successTime = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_success_timestamp_seconds",
Help: "The timestamp of the last successful completion of a DB backup.",
})
duration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_duration_seconds",
Help: "The duration of the last DB backup in seconds.",
})
records = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_records_processed",
Help: "The number of records processed in the last DB backup.",
})
)
func performBackup() (int, error) {
// Perform the backup and return the number of backed up records and any
// applicable error.
// ...
return 42, nil
}
func ExampleAddFromGatherer() {
registry := prometheus.NewRegistry()
registry.MustRegister(completionTime, duration, records)
// Note that successTime is not registered at this time.
start := time.Now()
n, err := performBackup()
records.Set(float64(n))
// Note that time.Since only uses a monotonic clock in Go1.9+.
duration.Set(time.Since(start).Seconds())
completionTime.SetToCurrentTime()
if err != nil {
fmt.Println("DB backup failed:", err)
} else {
// Only now register successTime.
registry.MustRegister(successTime)
successTime.SetToCurrentTime()
}
// AddFromGatherer is used here rather than FromGatherer to not delete a
// previously pushed success timestamp in case of a failure of this
// backup.
if err := push.AddFromGatherer(
"db_backup", nil,
"http://pushgateway:9091",
registry,
); err != nil {
fmt.Println("Could not push to Pushgateway:", err)
}
}

View File

@@ -0,0 +1,36 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push_test
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/push"
)
func ExampleCollectors() {
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_timestamp_seconds",
Help: "The timestamp of the last successful completion of a DB backup.",
})
completionTime.SetToCurrentTime()
if err := push.Collectors(
"db_backup", push.HostnameGroupingKey(),
"http://pushgateway:9091",
completionTime,
); err != nil {
fmt.Println("Could not push completion time to Pushgateway:", err)
}
}

View File

@@ -0,0 +1,172 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// Package push provides functions to push metrics to a Pushgateway. The metrics
// to push are either collected from a provided registry, or from explicitly
// listed collectors.
//
// See the documentation of the Pushgateway to understand the meaning of the
// grouping parameters and the differences between push.Registry and
// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors
// on the other hand: https://github.com/prometheus/pushgateway
package push
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/client_golang/prometheus"
)
const contentTypeHeader = "Content-Type"
// FromGatherer triggers a metric collection by the provided Gatherer (which is
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
// to the Pushgateway specified by url, using the provided job name and the
// (optional) further grouping labels (the grouping map may be nil). See the
// Pushgateway documentation for detailed implications of the job and other
// grouping labels. Neither the job name nor any grouping label value may
// contain a "/". The metrics pushed must not contain a job label of their own
// nor any of the grouping labels.
//
// You can use just host:port or ip:port as url, in which case 'http://' is
// added automatically. You can also include the schema in the URL. However, do
// not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and other grouping
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
// method 'PUT' to push to the Pushgateway.)
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
return push(job, grouping, url, g, "PUT")
}
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
// with the same name (and the same job and other grouping labels) will be
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
return push(job, grouping, url, g, "POST")
}
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
if !strings.Contains(pushURL, "://") {
pushURL = "http://" + pushURL
}
if strings.HasSuffix(pushURL, "/") {
pushURL = pushURL[:len(pushURL)-1]
}
if strings.Contains(job, "/") {
return fmt.Errorf("job contains '/': %s", job)
}
urlComponents := []string{url.QueryEscape(job)}
for ln, lv := range grouping {
if !model.LabelName(ln).IsValid() {
return fmt.Errorf("grouping label has invalid name: %s", ln)
}
if strings.Contains(lv, "/") {
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
}
urlComponents = append(urlComponents, ln, lv)
}
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
mfs, err := g.Gather()
if err != nil {
return err
}
buf := &bytes.Buffer{}
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
// Check for pre-existing grouping labels:
for _, mf := range mfs {
for _, m := range mf.GetMetric() {
for _, l := range m.GetLabel() {
if l.GetName() == "job" {
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
}
if _, ok := grouping[l.GetName()]; ok {
return fmt.Errorf(
"pushed metric %s (%s) already contains grouping label %s",
mf.GetName(), m, l.GetName(),
)
}
}
}
enc.Encode(mf)
}
req, err := http.NewRequest(method, pushURL, buf)
if err != nil {
return err
}
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 202 {
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
}
return nil
}
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
// it collects from the provided collectors directly. It is a convenient way to
// push only a few metrics.
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "PUT", collectors...)
}
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
// Instead, it collects from the provided collectors directly. It is a
// convenient way to push only a few metrics.
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
return pushCollectors(job, grouping, url, "POST", collectors...)
}
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
r := prometheus.NewRegistry()
for _, collector := range collectors {
if err := r.Register(collector); err != nil {
return err
}
}
return push(job, grouping, url, r, method)
}
// HostnameGroupingKey returns a label map with the only entry
// {instance="<hostname>"}. This can be conveniently used as the grouping
// parameter if metrics should be pushed with the hostname as label. The
// returned map is created upon each call so that the caller is free to add more
// labels to the map.
func HostnameGroupingKey() map[string]string {
hostname, err := os.Hostname()
if err != nil {
return map[string]string{"instance": "unknown"}
}
return map[string]string{"instance": hostname}
}

View File

@@ -0,0 +1,176 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package push
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/prometheus"
)
func TestPush(t *testing.T) {
var (
lastMethod string
lastBody []byte
lastPath string
)
host, err := os.Hostname()
if err != nil {
t.Error(err)
}
// Fake a Pushgateway that always responds with 202.
pgwOK := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lastMethod = r.Method
var err error
lastBody, err = ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
lastPath = r.URL.EscapedPath()
w.Header().Set("Content-Type", `text/plain; charset=utf-8`)
w.WriteHeader(http.StatusAccepted)
}),
)
defer pgwOK.Close()
// Fake a Pushgateway that always responds with 500.
pgwErr := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "fake error", http.StatusInternalServerError)
}),
)
defer pgwErr.Close()
metric1 := prometheus.NewCounter(prometheus.CounterOpts{
Name: "testname1",
Help: "testhelp1",
})
metric2 := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "testname2",
Help: "testhelp2",
ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"},
})
reg := prometheus.NewRegistry()
reg.MustRegister(metric1)
reg.MustRegister(metric2)
mfs, err := reg.Gather()
if err != nil {
t.Fatal(err)
}
buf := &bytes.Buffer{}
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
t.Fatal(err)
}
}
wantBody := buf.Bytes()
// PushCollectors, all good.
if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil {
t.Fatal(err)
}
if lastMethod != "PUT" {
t.Error("want method PUT for PushCollectors, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
if lastPath != "/metrics/job/testjob/instance/"+host {
t.Error("unexpected path:", lastPath)
}
// PushAddCollectors, with nil grouping, all good.
if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil {
t.Fatal(err)
}
if lastMethod != "POST" {
t.Error("want method POST for PushAddCollectors, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
if lastPath != "/metrics/job/testjob" {
t.Error("unexpected path:", lastPath)
}
// PushCollectors with a broken PGW.
if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push to broken Pushgateway succeeded")
} else {
if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want {
t.Errorf("got error %q, want %q", got, want)
}
}
// PushCollectors with invalid grouping or job.
if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with grouping contained in metrics succeeded")
}
if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with invalid job value succeeded")
}
if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with invalid grouping succeeded")
}
if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
t.Error("push with invalid grouping succeeded")
}
// Push registry, all good.
if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil {
t.Fatal(err)
}
if lastMethod != "PUT" {
t.Error("want method PUT for Push, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
// PushAdd registry, all good.
if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil {
t.Fatal(err)
}
if lastMethod != "POST" {
t.Error("want method POSTT for PushAdd, got", lastMethod)
}
if bytes.Compare(lastBody, wantBody) != 0 {
t.Errorf("got body %v, want %v", lastBody, wantBody)
}
if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
t.Error("unexpected path:", lastPath)
}
}

View File

@@ -0,0 +1,762 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"bytes"
"errors"
"fmt"
"os"
"sort"
"sync"
"unicode/utf8"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
const (
// Capacity for the channel to collect metrics and descriptors.
capMetricChan = 1000
capDescChan = 10
)
// DefaultRegisterer and DefaultGatherer are the implementations of the
// Registerer and Gatherer interface a number of convenience functions in this
// package act on. Initially, both variables point to the same Registry, which
// has a process collector (see NewProcessCollector) and a Go collector (see
// NewGoCollector) already registered. This approach to keep default instances
// as global state mirrors the approach of other packages in the Go standard
// library. Note that there are caveats. Change the variables with caution and
// only if you understand the consequences. Users who want to avoid global state
// altogether should not use the convenience function and act on custom
// instances instead.
var (
defaultRegistry = NewRegistry()
DefaultRegisterer Registerer = defaultRegistry
DefaultGatherer Gatherer = defaultRegistry
)
func init() {
MustRegister(NewProcessCollector(os.Getpid(), ""))
MustRegister(NewGoCollector())
}
// NewRegistry creates a new vanilla Registry without any Collectors
// pre-registered.
func NewRegistry() *Registry {
return &Registry{
collectorsByID: map[uint64]Collector{},
descIDs: map[uint64]struct{}{},
dimHashesByName: map[string]uint64{},
}
}
// NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has
// actually been registered with the registry.
//
// Usually, a Registry will be happy as long as the union of all collected
// Metrics is consistent and valid even if some metrics are not consistent with
// their own Desc or a Desc provided by their registered Collector. Well-behaved
// Collectors and Metrics will only provide consistent Descs. This Registry is
// useful to test the implementation of Collectors and Metrics.
func NewPedanticRegistry() *Registry {
r := NewRegistry()
r.pedanticChecksEnabled = true
return r
}
// Registerer is the interface for the part of a registry in charge of
// registering and unregistering. Users of custom registries should use
// Registerer as type for registration purposes (rather than the Registry type
// directly). In that way, they are free to use custom Registerer implementation
// (e.g. for testing purposes).
type Registerer interface {
// Register registers a new Collector to be included in metrics
// collection. It returns an error if the descriptors provided by the
// Collector are invalid or if they — in combination with descriptors of
// already registered Collectors — do not fulfill the consistency and
// uniqueness criteria described in the documentation of metric.Desc.
//
// If the provided Collector is equal to a Collector already registered
// (which includes the case of re-registering the same Collector), the
// returned error is an instance of AlreadyRegisteredError, which
// contains the previously registered Collector.
//
// It is in general not safe to register the same Collector multiple
// times concurrently.
Register(Collector) error
// MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an
// error.
MustRegister(...Collector)
// Unregister unregisters the Collector that equals the Collector passed
// in as an argument. (Two Collectors are considered equal if their
// Describe method yields the same set of descriptors.) The function
// returns whether a Collector was unregistered.
//
// Note that even after unregistering, it will not be possible to
// register a new Collector that is inconsistent with the unregistered
// Collector, e.g. a Collector collecting metrics with the same name but
// a different help string. The rationale here is that the same registry
// instance must only collect consistent metrics throughout its
// lifetime.
Unregister(Collector) bool
}
// Gatherer is the interface for the part of a registry in charge of gathering
// the collected metrics into a number of MetricFamilies. The Gatherer interface
// comes with the same general implication as described for the Registerer
// interface.
type Gatherer interface {
// Gather calls the Collect method of the registered Collectors and then
// gathers the collected metrics into a lexicographically sorted slice
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
// to gather as many metrics as possible. Hence, if a non-nil error is
// returned, the returned MetricFamily slice could be nil (in case of a
// fatal error that prevented any meaningful metric collection) or
// contain a number of MetricFamily protobufs, some of which might be
// incomplete, and some might be missing altogether. The returned error
// (which might be a MultiError) explains the details. In scenarios
// where complete collection is critical, the returned MetricFamily
// protobufs should be disregarded if the returned error is non-nil.
Gather() ([]*dto.MetricFamily, error)
}
// Register registers the provided Collector with the DefaultRegisterer.
//
// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
// details.
func Register(c Collector) error {
return DefaultRegisterer.Register(c)
}
// MustRegister registers the provided Collectors with the DefaultRegisterer and
// panics if any error occurs.
//
// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
// there for more details.
func MustRegister(cs ...Collector) {
DefaultRegisterer.MustRegister(cs...)
}
// Unregister removes the registration of the provided Collector from the
// DefaultRegisterer.
//
// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
// more details.
func Unregister(c Collector) bool {
return DefaultRegisterer.Unregister(c)
}
// GathererFunc turns a function into a Gatherer.
type GathererFunc func() ([]*dto.MetricFamily, error)
// Gather implements Gatherer.
func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
return gf()
}
// AlreadyRegisteredError is returned by the Register method if the Collector to
// be registered has already been registered before, or a different Collector
// that collects the same metrics has been registered before. Registration fails
// in that case, but you can detect from the kind of error what has
// happened. The error contains fields for the existing Collector and the
// (rejected) new Collector that equals the existing one. This can be used to
// find out if an equal Collector has been registered before and switch over to
// using the old one, as demonstrated in the example.
type AlreadyRegisteredError struct {
ExistingCollector, NewCollector Collector
}
func (err AlreadyRegisteredError) Error() string {
return "duplicate metrics collector registration attempted"
}
// MultiError is a slice of errors implementing the error interface. It is used
// by a Gatherer to report multiple errors during MetricFamily gathering.
type MultiError []error
func (errs MultiError) Error() string {
if len(errs) == 0 {
return ""
}
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
for _, err := range errs {
fmt.Fprintf(buf, "\n* %s", err)
}
return buf.String()
}
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
// contained error as error if len(errs is 1). In all other cases, it returns
// the MultiError directly. This is helpful for returning a MultiError in a way
// that only uses the MultiError if needed.
func (errs MultiError) MaybeUnwrap() error {
switch len(errs) {
case 0:
return nil
case 1:
return errs[0]
default:
return errs
}
}
// Registry registers Prometheus collectors, collects their metrics, and gathers
// them into MetricFamilies for exposition. It implements both Registerer and
// Gatherer. The zero value is not usable. Create instances with NewRegistry or
// NewPedanticRegistry.
type Registry struct {
mtx sync.RWMutex
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
descIDs map[uint64]struct{}
dimHashesByName map[string]uint64
pedanticChecksEnabled bool
}
// Register implements Registerer.
func (r *Registry) Register(c Collector) error {
var (
descChan = make(chan *Desc, capDescChan)
newDescIDs = map[uint64]struct{}{}
newDimHashesByName = map[string]uint64{}
collectorID uint64 // Just a sum of all desc IDs.
duplicateDescErr error
)
go func() {
c.Describe(descChan)
close(descChan)
}()
r.mtx.Lock()
defer r.mtx.Unlock()
// Conduct various tests...
for desc := range descChan {
// Is the descriptor valid at all?
if desc.err != nil {
return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
}
// Is the descID unique?
// (In other words: Is the fqName + constLabel combination unique?)
if _, exists := r.descIDs[desc.id]; exists {
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
}
// If it is not a duplicate desc in this collector, add it to
// the collectorID. (We allow duplicate descs within the same
// collector, but their existence must be a no-op.)
if _, exists := newDescIDs[desc.id]; !exists {
newDescIDs[desc.id] = struct{}{}
collectorID += desc.id
}
// Are all the label names and the help string consistent with
// previous descriptors of the same name?
// First check existing descriptors...
if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
if dimHash != desc.dimHash {
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
} else {
// ...then check the new descriptors already seen.
if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
if dimHash != desc.dimHash {
return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
} else {
newDimHashesByName[desc.fqName] = desc.dimHash
}
}
}
// Did anything happen at all?
if len(newDescIDs) == 0 {
return errors.New("collector has no descriptors")
}
if existing, exists := r.collectorsByID[collectorID]; exists {
return AlreadyRegisteredError{
ExistingCollector: existing,
NewCollector: c,
}
}
// If the collectorID is new, but at least one of the descs existed
// before, we are in trouble.
if duplicateDescErr != nil {
return duplicateDescErr
}
// Only after all tests have passed, actually register.
r.collectorsByID[collectorID] = c
for hash := range newDescIDs {
r.descIDs[hash] = struct{}{}
}
for name, dimHash := range newDimHashesByName {
r.dimHashesByName[name] = dimHash
}
return nil
}
// Unregister implements Registerer.
func (r *Registry) Unregister(c Collector) bool {
var (
descChan = make(chan *Desc, capDescChan)
descIDs = map[uint64]struct{}{}
collectorID uint64 // Just a sum of the desc IDs.
)
go func() {
c.Describe(descChan)
close(descChan)
}()
for desc := range descChan {
if _, exists := descIDs[desc.id]; !exists {
collectorID += desc.id
descIDs[desc.id] = struct{}{}
}
}
r.mtx.RLock()
if _, exists := r.collectorsByID[collectorID]; !exists {
r.mtx.RUnlock()
return false
}
r.mtx.RUnlock()
r.mtx.Lock()
defer r.mtx.Unlock()
delete(r.collectorsByID, collectorID)
for id := range descIDs {
delete(r.descIDs, id)
}
// dimHashesByName is left untouched as those must be consistent
// throughout the lifetime of a program.
return true
}
// MustRegister implements Registerer.
func (r *Registry) MustRegister(cs ...Collector) {
for _, c := range cs {
if err := r.Register(c); err != nil {
panic(err)
}
}
}
// Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
var (
metricChan = make(chan Metric, capMetricChan)
metricHashes = map[uint64]struct{}{}
dimHashes = map[string]uint64{}
wg sync.WaitGroup
errs MultiError // The collected errors to return in the end.
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
)
r.mtx.RLock()
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
// Scatter.
// (Collectors could be complex and slow, so we call them all at once.)
wg.Add(len(r.collectorsByID))
go func() {
wg.Wait()
close(metricChan)
}()
for _, collector := range r.collectorsByID {
go func(collector Collector) {
defer wg.Done()
collector.Collect(metricChan)
}(collector)
}
// In case pedantic checks are enabled, we have to copy the map before
// giving up the RLock.
if r.pedanticChecksEnabled {
registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
for id := range r.descIDs {
registeredDescIDs[id] = struct{}{}
}
}
r.mtx.RUnlock()
// Drain metricChan in case of premature return.
defer func() {
for range metricChan {
}
}()
// Gather.
for metric := range metricChan {
// This could be done concurrently, too, but it required locking
// of metricFamiliesByName (and of metricHashes if checks are
// enabled). Most likely not worth it.
desc := metric.Desc()
dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil {
errs = append(errs, fmt.Errorf(
"error collecting metric %v: %s", desc, err,
))
continue
}
metricFamily, ok := metricFamiliesByName[desc.fqName]
if ok {
if metricFamily.GetHelp() != desc.help {
errs = append(errs, fmt.Errorf(
"collected metric %s %s has help %q but should have %q",
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
))
continue
}
// TODO(beorn7): Simplify switch once Desc has type.
switch metricFamily.GetType() {
case dto.MetricType_COUNTER:
if dtoMetric.Counter == nil {
errs = append(errs, fmt.Errorf(
"collected metric %s %s should be a Counter",
desc.fqName, dtoMetric,
))
continue
}
case dto.MetricType_GAUGE:
if dtoMetric.Gauge == nil {
errs = append(errs, fmt.Errorf(
"collected metric %s %s should be a Gauge",
desc.fqName, dtoMetric,
))
continue
}
case dto.MetricType_SUMMARY:
if dtoMetric.Summary == nil {
errs = append(errs, fmt.Errorf(
"collected metric %s %s should be a Summary",
desc.fqName, dtoMetric,
))
continue
}
case dto.MetricType_UNTYPED:
if dtoMetric.Untyped == nil {
errs = append(errs, fmt.Errorf(
"collected metric %s %s should be Untyped",
desc.fqName, dtoMetric,
))
continue
}
case dto.MetricType_HISTOGRAM:
if dtoMetric.Histogram == nil {
errs = append(errs, fmt.Errorf(
"collected metric %s %s should be a Histogram",
desc.fqName, dtoMetric,
))
continue
}
default:
panic("encountered MetricFamily with invalid type")
}
} else {
metricFamily = &dto.MetricFamily{}
metricFamily.Name = proto.String(desc.fqName)
metricFamily.Help = proto.String(desc.help)
// TODO(beorn7): Simplify switch once Desc has type.
switch {
case dtoMetric.Gauge != nil:
metricFamily.Type = dto.MetricType_GAUGE.Enum()
case dtoMetric.Counter != nil:
metricFamily.Type = dto.MetricType_COUNTER.Enum()
case dtoMetric.Summary != nil:
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
case dtoMetric.Untyped != nil:
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
case dtoMetric.Histogram != nil:
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
default:
errs = append(errs, fmt.Errorf(
"empty metric collected: %s", dtoMetric,
))
continue
}
metricFamiliesByName[desc.fqName] = metricFamily
}
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
errs = append(errs, err)
continue
}
if r.pedanticChecksEnabled {
// Is the desc registered at all?
if _, exist := registeredDescIDs[desc.id]; !exist {
errs = append(errs, fmt.Errorf(
"collected metric %s %s with unregistered descriptor %s",
metricFamily.GetName(), dtoMetric, desc,
))
continue
}
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
errs = append(errs, err)
continue
}
}
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
}
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
// Gatherers is a slice of Gatherer instances that implements the Gatherer
// interface itself. Its Gather method calls Gather on all Gatherers in the
// slice in order and returns the merged results. Errors returned from the
// Gather calles are all returned in a flattened MultiError. Duplicate and
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
// reported in the returned error.
//
// Gatherers can be used to merge the Gather results from multiple
// Registries. It also provides a way to directly inject existing MetricFamily
// protobufs into the gathering by creating a custom Gatherer with a Gather
// method that simply returns the existing MetricFamily protobufs. Note that no
// registration is involved (in contrast to Collector registration), so
// obviously registration-time checks cannot happen. Any inconsistencies between
// the gathered MetricFamilies are reported as errors by the Gather method, and
// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
// (e.g. syntactically invalid metric or label names) will go undetected.
type Gatherers []Gatherer
// Gather implements Gatherer.
func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
var (
metricFamiliesByName = map[string]*dto.MetricFamily{}
metricHashes = map[uint64]struct{}{}
dimHashes = map[string]uint64{}
errs MultiError // The collected errors to return in the end.
)
for i, g := range gs {
mfs, err := g.Gather()
if err != nil {
if multiErr, ok := err.(MultiError); ok {
for _, err := range multiErr {
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
}
} else {
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
}
}
for _, mf := range mfs {
existingMF, exists := metricFamiliesByName[mf.GetName()]
if exists {
if existingMF.GetHelp() != mf.GetHelp() {
errs = append(errs, fmt.Errorf(
"gathered metric family %s has help %q but should have %q",
mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
))
continue
}
if existingMF.GetType() != mf.GetType() {
errs = append(errs, fmt.Errorf(
"gathered metric family %s has type %s but should have %s",
mf.GetName(), mf.GetType(), existingMF.GetType(),
))
continue
}
} else {
existingMF = &dto.MetricFamily{}
existingMF.Name = mf.Name
existingMF.Help = mf.Help
existingMF.Type = mf.Type
metricFamiliesByName[mf.GetName()] = existingMF
}
for _, m := range mf.Metric {
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
errs = append(errs, err)
continue
}
existingMF.Metric = append(existingMF.Metric, m)
}
}
}
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
// metricSorter is a sortable slice of *dto.Metric.
type metricSorter []*dto.Metric
func (s metricSorter) Len() int {
return len(s)
}
func (s metricSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()
if vi != vj {
return vi < vj
}
}
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
}
if s[j].TimestampMs == nil {
return true
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
// normalizeMetricFamilies returns a MetricFamily slice with empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
}
}
sort.Strings(names)
result := make([]*dto.MetricFamily, 0, len(names))
for _, name := range names {
result = append(result, metricFamiliesByName[name])
}
return result
}
// checkMetricConsistency checks if the provided Metric is consistent with the
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
// name. If the resulting hash is alread in the provided metricHashes, an error
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
// doesn't yet contain a hash for the provided MetricFamily, it is
// added. Otherwise, an error is returned if the existing dimHashes in not equal
// the calculated dimHash.
func checkMetricConsistency(
metricFamily *dto.MetricFamily,
dtoMetric *dto.Metric,
metricHashes map[uint64]struct{},
dimHashes map[string]uint64,
) error {
// Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf(
"collected metric %s %s is not a %s",
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
)
}
for _, labelPair := range dtoMetric.GetLabel() {
if !utf8.ValidString(*labelPair.Value) {
return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value)
}
}
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := hashNew()
h = hashAdd(h, metricFamily.GetName())
h = hashAddByte(h, separatorByte)
dh := hashNew()
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
sort.Sort(LabelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte)
dh = hashAdd(dh, lp.GetName())
dh = hashAddByte(dh, separatorByte)
}
if _, exists := metricHashes[h]; exists {
return fmt.Errorf(
"collected metric %s %s was collected before with the same name and label values",
metricFamily.GetName(), dtoMetric,
)
}
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
if dimHash != dh {
return fmt.Errorf(
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
metricFamily.GetName(), dtoMetric,
)
}
} else {
dimHashes[metricFamily.GetName()] = dh
}
metricHashes[h] = struct{}{}
return nil
}
func checkDescConsistency(
metricFamily *dto.MetricFamily,
dtoMetric *dto.Metric,
desc *Desc,
) error {
// Desc help consistency with metric family help.
if metricFamily.GetHelp() != desc.help {
return fmt.Errorf(
"collected metric %s %s has help %q but should have %q",
metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
)
}
// Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
for _, l := range desc.variableLabels {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l),
})
}
if len(lpsFromDesc) != len(dtoMetric.Label) {
return fmt.Errorf(
"labels in collected metric %s %s are inconsistent with descriptor %s",
metricFamily.GetName(), dtoMetric, desc,
)
}
sort.Sort(LabelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
return fmt.Errorf(
"labels in collected metric %s %s are inconsistent with descriptor %s",
metricFamily.GetName(), dtoMetric, desc,
)
}
}
return nil
}

View File

@@ -0,0 +1,590 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package prometheus_test
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func testHandler(t testing.TB) {
metricVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "name",
Help: "docstring",
ConstLabels: prometheus.Labels{"constname": "constvalue"},
},
[]string{"labelname"},
)
metricVec.WithLabelValues("val1").Inc()
metricVec.WithLabelValues("val2").Inc()
externalMetricFamily := &dto.MetricFamily{
Name: proto.String("externalname"),
Help: proto.String("externaldocstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("externalconstname"),
Value: proto.String("externalconstvalue"),
},
{
Name: proto.String("externallabelname"),
Value: proto.String("externalval1"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(1),
},
},
},
}
externalBuf := &bytes.Buffer{}
enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim)
if err := enc.Encode(externalMetricFamily); err != nil {
t.Fatal(err)
}
externalMetricFamilyAsBytes := externalBuf.Bytes()
externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring
# TYPE externalname counter
externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1
`)
externalMetricFamilyAsProtoText := []byte(`name: "externalname"
help: "externaldocstring"
type: COUNTER
metric: <
label: <
name: "externalconstname"
value: "externalconstvalue"
>
label: <
name: "externallabelname"
value: "externalval1"
>
counter: <
value: 1
>
>
`)
externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric:<label:<name:"externalconstname" value:"externalconstvalue" > label:<name:"externallabelname" value:"externalval1" > counter:<value:1 > >
`)
expectedMetricFamily := &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("docstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("constname"),
Value: proto.String("constvalue"),
},
{
Name: proto.String("labelname"),
Value: proto.String("val1"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(1),
},
},
{
Label: []*dto.LabelPair{
{
Name: proto.String("constname"),
Value: proto.String("constvalue"),
},
{
Name: proto.String("labelname"),
Value: proto.String("val2"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(1),
},
},
},
}
buf := &bytes.Buffer{}
enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
if err := enc.Encode(expectedMetricFamily); err != nil {
t.Fatal(err)
}
expectedMetricFamilyAsBytes := buf.Bytes()
expectedMetricFamilyAsText := []byte(`# HELP name docstring
# TYPE name counter
name{constname="constvalue",labelname="val1"} 1
name{constname="constvalue",labelname="val2"} 1
`)
expectedMetricFamilyAsProtoText := []byte(`name: "name"
help: "docstring"
type: COUNTER
metric: <
label: <
name: "constname"
value: "constvalue"
>
label: <
name: "labelname"
value: "val1"
>
counter: <
value: 1
>
>
metric: <
label: <
name: "constname"
value: "constvalue"
>
label: <
name: "labelname"
value: "val2"
>
counter: <
value: 1
>
>
`)
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`)
externalMetricFamilyWithSameName := &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("docstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("constname"),
Value: proto.String("constvalue"),
},
{
Name: proto.String("labelname"),
Value: proto.String("different_val"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(42),
},
},
},
}
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`)
externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("docstring"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("constname"),
Value: proto.String("\xFF"),
},
{
Name: proto.String("labelname"),
Value: proto.String("different_val"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(42),
},
},
},
}
expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering:
collected metric's label constname is not utf8: "\xff"
`)
type output struct {
headers map[string]string
body []byte
}
var scenarios = []struct {
headers map[string]string
out output
collector prometheus.Collector
externalMF []*dto.MetricFamily
}{
{ // 0
headers: map[string]string{
"Accept": "foo/bar;q=0.2, dings/bums;q=0.8",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: []byte{},
},
},
{ // 1
headers: map[string]string{
"Accept": "foo/bar;q=0.2, application/quark;q=0.8",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: []byte{},
},
},
{ // 2
headers: map[string]string{
"Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: []byte{},
},
},
{ // 3
headers: map[string]string{
"Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
},
body: []byte{},
},
},
{ // 4
headers: map[string]string{
"Accept": "application/json",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: expectedMetricFamilyAsText,
},
collector: metricVec,
},
{ // 5
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
},
body: expectedMetricFamilyAsBytes,
},
collector: metricVec,
},
{ // 6
headers: map[string]string{
"Accept": "application/json",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: externalMetricFamilyAsText,
},
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 7
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
},
body: externalMetricFamilyAsBytes,
},
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 8
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsBytes,
expectedMetricFamilyAsBytes,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 9
headers: map[string]string{
"Accept": "text/plain",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: []byte{},
},
},
{ // 10
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: expectedMetricFamilyAsText,
},
collector: metricVec,
},
{ // 11
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; version=0.0.4`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsText,
expectedMetricFamilyAsText,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 12
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsBytes,
expectedMetricFamilyAsBytes,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 13
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsProtoText,
expectedMetricFamilyAsProtoText,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 14
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsProtoCompactText,
expectedMetricFamilyAsProtoCompactText,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 15
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsProtoCompactText,
expectedMetricFamilyMergedWithExternalAsProtoCompactText,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{
externalMetricFamily,
externalMetricFamilyWithSameName,
},
},
{ // 16
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
},
out: output{
headers: map[string]string{
"Content-Type": `text/plain; charset=utf-8`,
},
body: expectedMetricFamilyInvalidLabelValueAsText,
},
collector: metricVec,
externalMF: []*dto.MetricFamily{
externalMetricFamily,
externalMetricFamilyWithInvalidLabelValue,
},
},
}
for i, scenario := range scenarios {
registry := prometheus.NewPedanticRegistry()
gatherer := prometheus.Gatherer(registry)
if scenario.externalMF != nil {
gatherer = prometheus.Gatherers{
registry,
prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {
return scenario.externalMF, nil
}),
}
}
if scenario.collector != nil {
registry.Register(scenario.collector)
}
writer := httptest.NewRecorder()
handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
request, _ := http.NewRequest("GET", "/", nil)
for key, value := range scenario.headers {
request.Header.Add(key, value)
}
handler(writer, request)
for key, value := range scenario.out.headers {
if writer.HeaderMap.Get(key) != value {
t.Errorf(
"%d. expected %q for header %q, got %q",
i, value, key, writer.Header().Get(key),
)
}
}
if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) {
t.Errorf(
"%d. expected body:\n%s\ngot body:\n%s\n",
i, scenario.out.body, writer.Body.Bytes(),
)
}
}
}
func TestHandler(t *testing.T) {
testHandler(t)
}
func BenchmarkHandler(b *testing.B) {
for i := 0; i < b.N; i++ {
testHandler(b)
}
}
func TestRegisterWithOrGet(t *testing.T) {
// Replace the default registerer just to be sure. This is bad, but this
// whole test will go away once RegisterOrGet is removed.
oldRegisterer := prometheus.DefaultRegisterer
defer func() {
prometheus.DefaultRegisterer = oldRegisterer
}()
prometheus.DefaultRegisterer = prometheus.NewRegistry()
original := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "test",
Help: "help",
},
[]string{"foo", "bar"},
)
equalButNotSame := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "test",
Help: "help",
},
[]string{"foo", "bar"},
)
var err error
if err = prometheus.Register(original); err != nil {
t.Fatal(err)
}
if err = prometheus.Register(equalButNotSame); err == nil {
t.Fatal("expected error when registringe equal collector")
}
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
if are.ExistingCollector != original {
t.Error("expected original collector but got something else")
}
if are.ExistingCollector == equalButNotSame {
t.Error("expected original callector but got new one")
}
} else {
t.Error("unexpected error:", err)
}
}

View File

@@ -0,0 +1,572 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"math"
"sort"
"sync"
"time"
"github.com/beorn7/perks/quantile"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
// quantileLabel is used for the label that defines the quantile in a
// summary.
const quantileLabel = "quantile"
// A Summary captures individual observations from an event or sample stream and
// summarizes them in a manner similar to traditional summary statistics: 1. sum
// of observations, 2. observation count, 3. rank estimations.
//
// A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency
// as rank estimations.
//
// Note that the rank estimations cannot be aggregated in a meaningful way with
// the Prometheus query language (i.e. you cannot average or add them). If you
// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
// queries served across all instances of a service), consider the Histogram
// metric type. See the Prometheus documentation for more details.
//
// To create Summary instances, use NewSummary.
type Summary interface {
Metric
Collector
// Observe adds a single observation to the summary.
Observe(float64)
}
// DefObjectives are the default Summary quantile values.
//
// Deprecated: DefObjectives will not be used as the default objectives in
// v0.10 of the library. The default Summary will have no quantiles then.
var (
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf(
"%q is not allowed as label name in summaries", quantileLabel,
)
)
// Default values for SummaryOpts.
const (
// DefMaxAge is the default duration for which observations stay
// relevant.
DefMaxAge time.Duration = 10 * time.Minute
// DefAgeBuckets is the default number of buckets used to calculate the
// age of observations.
DefAgeBuckets = 5
// DefBufCap is the standard buffer size for collecting Summary observations.
DefBufCap = 500
)
// SummaryOpts bundles the options for creating a Summary metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are
// optional and can safely be left at their zero value.
type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the Summary must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this Summary. Mandatory!
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this
// Summary. Summaries with the same fully-qualified name must have the
// same label names in their ConstLabels.
//
// Note that in most cases, labels have a value that varies during the
// lifetime of a process. Those labels are usually managed with a
// SummaryVec. ConstLabels serve only special purposes. One is for the
// special case where the value of a label does not change during the
// lifetime of a process, e.g. if the revision of the running binary is
// put into a label. Another, more advanced purpose is if more than one
// Collector needs to collect Summaries with the same fully-qualified
// name. In that case, those Summaries must differ in the values of
// their ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
// absolute error. If Objectives[q] = e, then the value reported for q
// will be the φ-quantile value for some φ between q-e and q+e. The
// default value is DefObjectives. It is used if Objectives is left at
// its zero value (i.e. nil). To create a Summary without Objectives,
// set it to an empty map (i.e. map[float64]float64{}).
//
// Deprecated: Note that the current value of DefObjectives is
// deprecated. It will be replaced by an empty map in v0.10 of the
// library. Please explicitly set Objectives to the desired value.
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
// for the summary. Must be positive. The default value is DefMaxAge.
MaxAge time.Duration
// AgeBuckets is the number of buckets used to exclude observations that
// are older than MaxAge from the summary. A higher number has a
// resource penalty, so only increase it if the higher resolution is
// really required. For very high observation rates, you might want to
// reduce the number of age buckets. With only one age bucket, you will
// effectively see a complete reset of the summary each time MaxAge has
// passed. The default value is DefAgeBuckets.
AgeBuckets uint32
// BufCap defines the default sample stream buffer size. The default
// value of DefBufCap should suffice for most uses. If there is a need
// to increase the value, a multiple of 500 is recommended (because that
// is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile").
BufCap uint32
}
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
// summaries in the first place. To avoid using Merge, we are currently adding
// observations to _each_ age bucket, i.e. the effort to add a sample is
// essentially multiplied by the number of age buckets. When rotating age
// buckets, we empty the previous head stream. On scrape time, we simply take
// the quantiles from the head stream (no merging required). Result: More effort
// on observation time, less effort on scrape time, which is exactly the
// opposite of what we try to accomplish, but at least the results are correct.
//
// The quite elegant previous contraption to merge the age buckets efficiently
// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
// can't be used anymore.
// NewSummary creates a new Summary based on the provided SummaryOpts.
func NewSummary(opts SummaryOpts) Summary {
return newSummary(
NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
),
opts,
)
}
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) {
panic(errInconsistentCardinality)
}
for _, n := range desc.variableLabels {
if n == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
for _, lp := range desc.constLabelPairs {
if lp.GetName() == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
if opts.Objectives == nil {
opts.Objectives = DefObjectives
}
if opts.MaxAge < 0 {
panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
}
if opts.MaxAge == 0 {
opts.MaxAge = DefMaxAge
}
if opts.AgeBuckets == 0 {
opts.AgeBuckets = DefAgeBuckets
}
if opts.BufCap == 0 {
opts.BufCap = DefBufCap
}
s := &summary{
desc: desc,
objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
labelPairs: makeLabelPairs(desc, labelValues),
hotBuf: make([]float64, 0, opts.BufCap),
coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
}
s.headStreamExpTime = time.Now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ {
s.streams = append(s.streams, s.newStream())
}
s.headStream = s.streams[0]
for qu := range s.objectives {
s.sortedObjectives = append(s.sortedObjectives, qu)
}
sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection.
return s
}
type summary struct {
selfCollector
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
mtx sync.Mutex // Protects every other moving part.
// Lock bufMtx before mtx if both are needed.
desc *Desc
objectives map[float64]float64
sortedObjectives []float64
labelPairs []*dto.LabelPair
sum float64
cnt uint64
hotBuf, coldBuf []float64
streams []*quantile.Stream
streamDuration time.Duration
headStream *quantile.Stream
headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time
}
func (s *summary) Desc() *Desc {
return s.desc
}
func (s *summary) Observe(v float64) {
s.bufMtx.Lock()
defer s.bufMtx.Unlock()
now := time.Now()
if now.After(s.hotBufExpTime) {
s.asyncFlush(now)
}
s.hotBuf = append(s.hotBuf, v)
if len(s.hotBuf) == cap(s.hotBuf) {
s.asyncFlush(now)
}
}
func (s *summary) Write(out *dto.Metric) error {
sum := &dto.Summary{}
qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock()
s.mtx.Lock()
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
s.swapBufs(time.Now())
s.bufMtx.Unlock()
s.flushColdBuf()
sum.SampleCount = proto.Uint64(s.cnt)
sum.SampleSum = proto.Float64(s.sum)
for _, rank := range s.sortedObjectives {
var q float64
if s.headStream.Count() == 0 {
q = math.NaN()
} else {
q = s.headStream.Query(rank)
}
qs = append(qs, &dto.Quantile{
Quantile: proto.Float64(rank),
Value: proto.Float64(q),
})
}
s.mtx.Unlock()
if len(qs) > 0 {
sort.Sort(quantSort(qs))
}
sum.Quantile = qs
out.Summary = sum
out.Label = s.labelPairs
return nil
}
func (s *summary) newStream() *quantile.Stream {
return quantile.NewTargeted(s.objectives)
}
// asyncFlush needs bufMtx locked.
func (s *summary) asyncFlush(now time.Time) {
s.mtx.Lock()
s.swapBufs(now)
// Unblock the original goroutine that was responsible for the mutation
// that triggered the compaction. But hold onto the global non-buffer
// state mutex until the operation finishes.
go func() {
s.flushColdBuf()
s.mtx.Unlock()
}()
}
// rotateStreams needs mtx AND bufMtx locked.
func (s *summary) maybeRotateStreams() {
for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
s.headStream.Reset()
s.headStreamIdx++
if s.headStreamIdx >= len(s.streams) {
s.headStreamIdx = 0
}
s.headStream = s.streams[s.headStreamIdx]
s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
}
}
// flushColdBuf needs mtx locked.
func (s *summary) flushColdBuf() {
for _, v := range s.coldBuf {
for _, stream := range s.streams {
stream.Insert(v)
}
s.cnt++
s.sum += v
}
s.coldBuf = s.coldBuf[0:0]
s.maybeRotateStreams()
}
// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
func (s *summary) swapBufs(now time.Time) {
if len(s.coldBuf) != 0 {
panic("coldBuf is not empty")
}
s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
// hotBuf is now empty and gets new expiration set.
for now.After(s.hotBufExpTime) {
s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
}
}
type quantSort []*dto.Quantile
func (s quantSort) Len() int {
return len(s)
}
func (s quantSort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s quantSort) Less(i, j int) bool {
return s[i].GetQuantile() < s[j].GetQuantile()
}
// SummaryVec is a Collector that bundles a set of Summaries that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
*metricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
// partitioned by the given label names.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &SummaryVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Summary for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// label values is accessed for the first time, a new Summary is created.
//
// It is possible to call this method without using the returned Summary to only
// create the new Summary but leave it at its starting value, a Summary without
// any observations.
//
// Keeping the Summary for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Summary from the SummaryVec. In that case, the
// Summary will still exist, but it will not be exported anymore, even if a
// Summary with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
return nil, err
}
// GetMetricWith returns the Summary for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// accessed for the first time, a new Summary is created. Implications of
// creating a Summary without using it and keeping the Summary for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc.
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *SummaryVec) WithLabelValues(lvs ...string) Observer {
return m.metricVec.withLabelValues(lvs...).(Observer)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (m *SummaryVec) With(labels Labels) Observer {
return m.metricVec.with(labels).(Observer)
}
type constSummary struct {
desc *Desc
count uint64
sum float64
quantiles map[float64]float64
labelPairs []*dto.LabelPair
}
func (s *constSummary) Desc() *Desc {
return s.desc
}
func (s *constSummary) Write(out *dto.Metric) error {
sum := &dto.Summary{}
qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count)
sum.SampleSum = proto.Float64(s.sum)
for rank, q := range s.quantiles {
qs = append(qs, &dto.Quantile{
Quantile: proto.Float64(rank),
Value: proto.Float64(q),
})
}
if len(qs) > 0 {
sort.Sort(quantSort(qs))
}
sum.Quantile = qs
out.Summary = sum
out.Label = s.labelPairs
return nil
}
// NewConstSummary returns a metric representing a Prometheus summary with fixed
// values for the count, sum, and quantiles. As those parameters cannot be
// changed, the returned value does not implement the Summary interface (but
// only the Metric interface). Users of this package will not have much use for
// it in regular operations. However, when implementing custom Collectors, it is
// useful as a throw-away metric that is generated on the fly to send it to
// Prometheus in the Collect method.
//
// quantiles maps ranks to quantile values. For example, a median latency of
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
//
// NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc.
func NewConstSummary(
desc *Desc,
count uint64,
sum float64,
quantiles map[float64]float64,
labelValues ...string,
) (Metric, error) {
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
return &constSummary{
desc: desc,
count: count,
sum: sum,
quantiles: quantiles,
labelPairs: makeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstSummary is a version of NewConstSummary that panics where
// NewConstMetric would have returned an error.
func MustNewConstSummary(
desc *Desc,
count uint64,
sum float64,
quantiles map[float64]float64,
labelValues ...string,
) Metric {
m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
if err != nil {
panic(err)
}
return m
}

View File

@@ -0,0 +1,388 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"math/rand"
"sort"
"sync"
"testing"
"testing/quick"
"time"
dto "github.com/prometheus/client_model/go"
)
func TestSummaryWithDefaultObjectives(t *testing.T) {
reg := NewRegistry()
summaryWithDefaultObjectives := NewSummary(SummaryOpts{
Name: "default_objectives",
Help: "Test help.",
})
if err := reg.Register(summaryWithDefaultObjectives); err != nil {
t.Error(err)
}
m := &dto.Metric{}
if err := summaryWithDefaultObjectives.Write(m); err != nil {
t.Error(err)
}
if len(m.GetSummary().Quantile) != len(DefObjectives) {
t.Error("expected default objectives in summary")
}
}
func TestSummaryWithoutObjectives(t *testing.T) {
reg := NewRegistry()
summaryWithEmptyObjectives := NewSummary(SummaryOpts{
Name: "empty_objectives",
Help: "Test help.",
Objectives: map[float64]float64{},
})
if err := reg.Register(summaryWithEmptyObjectives); err != nil {
t.Error(err)
}
m := &dto.Metric{}
if err := summaryWithEmptyObjectives.Write(m); err != nil {
t.Error(err)
}
if len(m.GetSummary().Quantile) != 0 {
t.Error("expected no objectives in summary")
}
}
func benchmarkSummaryObserve(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewSummary(SummaryOpts{})
for i := 0; i < w; i++ {
go func() {
g.Wait()
for i := 0; i < b.N; i++ {
s.Observe(float64(i))
}
wg.Done()
}()
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkSummaryObserve1(b *testing.B) {
benchmarkSummaryObserve(1, b)
}
func BenchmarkSummaryObserve2(b *testing.B) {
benchmarkSummaryObserve(2, b)
}
func BenchmarkSummaryObserve4(b *testing.B) {
benchmarkSummaryObserve(4, b)
}
func BenchmarkSummaryObserve8(b *testing.B) {
benchmarkSummaryObserve(8, b)
}
func benchmarkSummaryWrite(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewSummary(SummaryOpts{})
for i := 0; i < 1000000; i++ {
s.Observe(float64(i))
}
for j := 0; j < w; j++ {
outs := make([]dto.Metric, b.N)
go func(o []dto.Metric) {
g.Wait()
for i := 0; i < b.N; i++ {
s.Write(&o[i])
}
wg.Done()
}(outs)
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkSummaryWrite1(b *testing.B) {
benchmarkSummaryWrite(1, b)
}
func BenchmarkSummaryWrite2(b *testing.B) {
benchmarkSummaryWrite(2, b)
}
func BenchmarkSummaryWrite4(b *testing.B) {
benchmarkSummaryWrite(4, b)
}
func BenchmarkSummaryWrite8(b *testing.B) {
benchmarkSummaryWrite(8, b)
}
func TestSummaryConcurrency(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
}
rand.Seed(42)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%5 + 1)
total := mutations * concLevel
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sum := NewSummary(SummaryOpts{
Name: "test_summary",
Help: "helpless",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
})
allVars := make([]float64, total)
var sampleSum float64
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
allVars[i*mutations+j] = v
sampleSum += v
}
go func(vals []float64) {
start.Wait()
for _, v := range vals {
sum.Observe(v)
}
end.Done()
}(vals)
}
sort.Float64s(allVars)
start.Done()
end.Wait()
m := &dto.Metric{}
sum.Write(m)
if got, want := int(*m.Summary.SampleCount), total; got != want {
t.Errorf("got sample count %d, want %d", got, want)
}
if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f, want %f", got, want)
}
objectives := make([]float64, 0, len(DefObjectives))
for qu := range DefObjectives {
objectives = append(objectives, qu)
}
sort.Float64s(objectives)
for i, wantQ := range objectives {
ε := DefObjectives[wantQ]
gotQ := *m.Summary.Quantile[i].Quantile
gotV := *m.Summary.Quantile[i].Value
min, max := getBounds(allVars, wantQ, ε)
if gotQ != wantQ {
t.Errorf("got quantile %f, want %f", gotQ, wantQ)
}
if gotV < min || gotV > max {
t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max)
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func TestSummaryVecConcurrency(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
}
rand.Seed(42)
objectives := make([]float64, 0, len(DefObjectives))
for qu := range DefObjectives {
objectives = append(objectives, qu)
}
sort.Float64s(objectives)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%7 + 1)
vecLength := int(n%3 + 1)
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sum := NewSummaryVec(
SummaryOpts{
Name: "test_summary",
Help: "helpless",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"label"},
)
allVars := make([][]float64, vecLength)
sampleSums := make([]float64, vecLength)
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
picks := make([]int, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
pick := rand.Intn(vecLength)
picks[j] = pick
allVars[pick] = append(allVars[pick], v)
sampleSums[pick] += v
}
go func(vals []float64) {
start.Wait()
for i, v := range vals {
sum.WithLabelValues(string('A' + picks[i])).Observe(v)
}
end.Done()
}(vals)
}
for _, vars := range allVars {
sort.Float64s(vars)
}
start.Done()
end.Wait()
for i := 0; i < vecLength; i++ {
m := &dto.Metric{}
s := sum.WithLabelValues(string('A' + i))
s.(Summary).Write(m)
if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
}
if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want)
}
for j, wantQ := range objectives {
ε := DefObjectives[wantQ]
gotQ := *m.Summary.Quantile[j].Quantile
gotV := *m.Summary.Quantile[j].Value
min, max := getBounds(allVars[i], wantQ, ε)
if gotQ != wantQ {
t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ)
}
if gotV < min || gotV > max {
t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max)
}
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func TestSummaryDecay(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
// More because it depends on timing than because it is particularly long...
}
sum := NewSummary(SummaryOpts{
Name: "test_summary",
Help: "helpless",
MaxAge: 100 * time.Millisecond,
Objectives: map[float64]float64{0.1: 0.001},
AgeBuckets: 10,
})
m := &dto.Metric{}
i := 0
tick := time.NewTicker(time.Millisecond)
for range tick.C {
i++
sum.Observe(float64(i))
if i%10 == 0 {
sum.Write(m)
if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {
t.Errorf("%d. got %f, want %f", i, got, want)
}
m.Reset()
}
if i >= 1000 {
break
}
}
tick.Stop()
// Wait for MaxAge without observations and make sure quantiles are NaN.
time.Sleep(100 * time.Millisecond)
sum.Write(m)
if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
t.Errorf("got %f, want NaN after expiration", got)
}
}
func getBounds(vars []float64, q, ε float64) (min, max float64) {
// TODO(beorn7): This currently tolerates an error of up to 2*ε. The
// error must be at most ε, but for some reason, it's sometimes slightly
// higher. That's a bug.
n := float64(len(vars))
lower := int((q - 2*ε) * n)
upper := int(math.Ceil((q + 2*ε) * n))
min = vars[0]
if lower > 1 {
min = vars[lower-1]
}
max = vars[len(vars)-1]
if upper < len(vars) {
max = vars[upper-1]
}
return
}

View File

@@ -0,0 +1,51 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "time"
// Timer is a helper type to time functions. Use NewTimer to create new
// instances.
type Timer struct {
begin time.Time
observer Observer
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
// duration in seconds. Timer is usually used to time a function call in the
// following way:
// func TimeMe() {
// timer := NewTimer(myHistogram)
// defer timer.ObserveDuration()
// // Do actual work.
// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
observer: o,
}
}
// ObserveDuration records the duration passed since the Timer was created with
// NewTimer. It calls the Observe method of the Observer provided during
// construction with the duration in seconds as an argument. ObserveDuration is
// usually called with a defer statement.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func (t *Timer) ObserveDuration() {
if t.observer != nil {
t.observer.Observe(time.Since(t.begin).Seconds())
}
}

View File

@@ -0,0 +1,152 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"testing"
dto "github.com/prometheus/client_model/go"
)
func TestTimerObserve(t *testing.T) {
var (
his = NewHistogram(HistogramOpts{Name: "test_histogram"})
sum = NewSummary(SummaryOpts{Name: "test_summary"})
gauge = NewGauge(GaugeOpts{Name: "test_gauge"})
)
func() {
hisTimer := NewTimer(his)
sumTimer := NewTimer(sum)
gaugeTimer := NewTimer(ObserverFunc(gauge.Set))
defer hisTimer.ObserveDuration()
defer sumTimer.ObserveDuration()
defer gaugeTimer.ObserveDuration()
}()
m := &dto.Metric{}
his.Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for histogram, got %d", want, got)
}
m.Reset()
sum.Write(m)
if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got {
t.Errorf("want %d observations for summary, got %d", want, got)
}
m.Reset()
gauge.Write(m)
if got := m.GetGauge().GetValue(); got <= 0 {
t.Errorf("want value > 0 for gauge, got %f", got)
}
}
func TestTimerEmpty(t *testing.T) {
emptyTimer := NewTimer(nil)
emptyTimer.ObserveDuration()
// Do nothing, just demonstrate it works without panic.
}
func TestTimerConditionalTiming(t *testing.T) {
var (
his = NewHistogram(HistogramOpts{
Name: "test_histogram",
})
timeMe = true
m = &dto.Metric{}
)
timedFunc := func() {
timer := NewTimer(ObserverFunc(func(v float64) {
if timeMe {
his.Observe(v)
}
}))
defer timer.ObserveDuration()
}
timedFunc() // This will time.
his.Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for histogram, got %d", want, got)
}
timeMe = false
timedFunc() // This will not time again.
m.Reset()
his.Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for histogram, got %d", want, got)
}
}
func TestTimerByOutcome(t *testing.T) {
var (
his = NewHistogramVec(
HistogramOpts{Name: "test_histogram"},
[]string{"outcome"},
)
outcome = "foo"
m = &dto.Metric{}
)
timedFunc := func() {
timer := NewTimer(ObserverFunc(func(v float64) {
his.WithLabelValues(outcome).Observe(v)
}))
defer timer.ObserveDuration()
if outcome == "foo" {
outcome = "bar"
return
}
outcome = "foo"
}
timedFunc()
his.WithLabelValues("foo").(Histogram).Write(m)
if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
}
m.Reset()
his.WithLabelValues("bar").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
}
timedFunc()
m.Reset()
his.WithLabelValues("foo").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
}
m.Reset()
his.WithLabelValues("bar").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
}
timedFunc()
m.Reset()
his.WithLabelValues("foo").(Histogram).Write(m)
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
}
m.Reset()
his.WithLabelValues("bar").(Histogram).Write(m)
if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
}
}

View File

@@ -0,0 +1,42 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// UntypedOpts is an alias for Opts. See there for doc comments.
type UntypedOpts Opts
// UntypedFunc works like GaugeFunc but the collected metric is of type
// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
// type.
//
// To create UntypedFunc instances, use NewUntypedFunc.
type UntypedFunc interface {
Metric
Collector
}
// NewUntypedFunc creates a new UntypedFunc based on the provided
// UntypedOpts. The value reported is determined by calling the given function
// from within the Write method. Take into account that metric collection may
// happen concurrently. If that results in concurrent calls to Write, like in
// the case where an UntypedFunc is directly registered with Prometheus, the
// provided function must be concurrency-safe.
func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), UntypedValue, function)
}

View File

@@ -0,0 +1,236 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"math"
"sort"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto"
)
// ValueType is an enumeration of metric types that represent a simple value.
type ValueType int
// Possible values for the ValueType enum.
const (
_ ValueType = iota
CounterValue
GaugeValue
UntypedValue
)
// value is a generic metric for simple values. It implements Metric, Collector,
// Counter, Gauge, and Untyped. Its effective type is determined by
// ValueType. This is a low-level building block used by the library to back the
// implementations of Counter, Gauge, and Untyped.
type value struct {
// valBits contains the bits of the represented float64 value. It has
// to go first in the struct to guarantee alignment for atomic
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
selfCollector
desc *Desc
valType ValueType
labelPairs []*dto.LabelPair
}
// newValue returns a newly allocated value with the given Desc, ValueType,
// sample value and label values. It panics if the number of label
// values is different from the number of variable labels in Desc.
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
if len(labelValues) != len(desc.variableLabels) {
panic(errInconsistentCardinality)
}
result := &value{
desc: desc,
valType: valueType,
valBits: math.Float64bits(val),
labelPairs: makeLabelPairs(desc, labelValues),
}
result.init(result)
return result
}
func (v *value) Desc() *Desc {
return v.desc
}
func (v *value) Set(val float64) {
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
}
func (v *value) SetToCurrentTime() {
v.Set(float64(time.Now().UnixNano()) / 1e9)
}
func (v *value) Inc() {
v.Add(1)
}
func (v *value) Dec() {
v.Add(-1)
}
func (v *value) Add(val float64) {
for {
oldBits := atomic.LoadUint64(&v.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
return
}
}
}
func (v *value) Sub(val float64) {
v.Add(val * -1)
}
func (v *value) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
return populateMetric(v.valType, val, v.labelPairs, out)
}
// valueFunc is a generic metric for simple values retrieved on collect time
// from a function. It implements Metric and Collector. Its effective type is
// determined by ValueType. This is a low-level building block used by the
// library to back the implementations of CounterFunc, GaugeFunc, and
// UntypedFunc.
type valueFunc struct {
selfCollector
desc *Desc
valType ValueType
function func() float64
labelPairs []*dto.LabelPair
}
// newValueFunc returns a newly allocated valueFunc with the given Desc and
// ValueType. The value reported is determined by calling the given function
// from within the Write method. Take into account that metric collection may
// happen concurrently. If that results in concurrent calls to Write, like in
// the case where a valueFunc is directly registered with Prometheus, the
// provided function must be concurrency-safe.
func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
result := &valueFunc{
desc: desc,
valType: valueType,
function: function,
labelPairs: makeLabelPairs(desc, nil),
}
result.init(result)
return result
}
func (v *valueFunc) Desc() *Desc {
return v.desc
}
func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, out)
}
// NewConstMetric returns a metric with one fixed value that cannot be
// changed. Users of this package will not have much use for it in regular
// operations. However, when implementing custom Collectors, it is useful as a
// throw-away metric that is generated on the fly to send it to Prometheus in
// the Collect method. NewConstMetric returns an error if the length of
// labelValues is not consistent with the variable labels in Desc.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
return &constMetric{
desc: desc,
valType: valueType,
val: value,
labelPairs: makeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstMetric is a version of NewConstMetric that panics where
// NewConstMetric would have returned an error.
func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
m, err := NewConstMetric(desc, valueType, value, labelValues...)
if err != nil {
panic(err)
}
return m
}
type constMetric struct {
desc *Desc
valType ValueType
val float64
labelPairs []*dto.LabelPair
}
func (m *constMetric) Desc() *Desc {
return m.desc
}
func (m *constMetric) Write(out *dto.Metric) error {
return populateMetric(m.valType, m.val, m.labelPairs, out)
}
func populateMetric(
t ValueType,
v float64,
labelPairs []*dto.LabelPair,
m *dto.Metric,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v)}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
default:
return fmt.Errorf("encountered unknown type %v", t)
}
return nil
}
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.
return nil
}
if len(desc.variableLabels) == 0 {
// Moderately fast path.
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
for i, n := range desc.variableLabels {
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(n),
Value: proto.String(labelValues[i]),
})
}
for _, lp := range desc.constLabelPairs {
labelPairs = append(labelPairs, lp)
}
sort.Sort(LabelPairSorter(labelPairs))
return labelPairs
}

View File

@@ -0,0 +1,43 @@
package prometheus
import (
"fmt"
"testing"
)
func TestNewConstMetricInvalidLabelValues(t *testing.T) {
testCases := []struct {
desc string
labels Labels
}{
{
desc: "non utf8 label value",
labels: Labels{"a": "\xFF"},
},
{
desc: "not enough label values",
labels: Labels{},
},
{
desc: "too many label values",
labels: Labels{"a": "1", "b": "2"},
},
}
for _, test := range testCases {
metricDesc := NewDesc(
"sample_value",
"sample value",
[]string{"a"},
Labels{},
)
expectPanic(t, func() {
MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF")
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil {
t.Errorf("NewConstMetric: expected error because: %s", test.desc)
}
}
}

View File

@@ -0,0 +1,363 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"sync"
"github.com/prometheus/common/model"
)
// metricVec is a Collector to bundle metrics of the same name that differ in
// their label values. metricVec is not used directly (and therefore
// unexported). It is used as a building block for implementations of vectors of
// a given metric type, like GaugeVec, CounterVec, SummaryVec, HistogramVec, and
// UntypedVec.
type metricVec struct {
mtx sync.RWMutex // Protects the children.
children map[uint64][]metricWithLabelValues
desc *Desc
newMetric func(labelValues ...string) Metric
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
hashAddByte func(h uint64, b byte) uint64
}
// newMetricVec returns an initialized metricVec.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
return &metricVec{
children: map[uint64][]metricWithLabelValues{},
desc: desc,
newMetric: newMetric,
hashAdd: hashAdd,
hashAddByte: hashAddByte,
}
}
// metricWithLabelValues provides the metric and its label values for
// disambiguation on hash collision.
type metricWithLabelValues struct {
values []string
metric Metric
}
// Describe implements Collector. The length of the returned slice
// is always one.
func (m *metricVec) Describe(ch chan<- *Desc) {
ch <- m.desc
}
// Collect implements Collector.
func (m *metricVec) Collect(ch chan<- Metric) {
m.mtx.RLock()
defer m.mtx.RUnlock()
for _, metrics := range m.children {
for _, metric := range metrics {
ch <- metric.metric
}
}
}
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
}
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
return m.getOrCreateMetricWithLabels(h, labels), nil
}
func (m *metricVec) withLabelValues(lvs ...string) Metric {
metric, err := m.getMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return metric
}
func (m *metricVec) with(labels Labels) Metric {
metric, err := m.getMetricWith(labels)
if err != nil {
panic(err)
}
return metric
}
// DeleteLabelValues removes the metric where the variable labels are the same
// as those passed in as labels (same order as the VariableLabels in Desc). It
// returns true if a metric was deleted.
//
// It is not an error if the number of label values is not the same as the
// number of VariableLabels in Desc. However, such inconsistent label count can
// never match an actual metric, so the method will always return false in that
// case.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
// alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
}
return m.deleteByHashWithLabelValues(h, lvs)
}
// Delete deletes the metric where the variable labels are the same as those
// passed in as labels. It returns true if a metric was deleted.
//
// It is not an error if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc. However, such inconsistent Labels
// can never match an actual metric, so the method will always return false in
// that case.
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *metricVec) Delete(labels Labels) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabels(labels)
if err != nil {
return false
}
return m.deleteByHashWithLabels(h, labels)
}
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
func (m *metricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
i := m.findMetricWithLabelValues(metrics, lvs)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
func (m *metricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
i := m.findMetricWithLabels(metrics, labels)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
// Reset deletes all metrics in this vector.
func (m *metricVec) Reset() {
m.mtx.Lock()
defer m.mtx.Unlock()
for h := range m.children {
delete(m.children, h)
}
}
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)); err != nil {
return 0, err
}
h := hashNew()
for _, val := range vals {
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)); err != nil {
return 0, err
}
h := hashNew()
for _, label := range m.desc.variableLabels {
val, ok := labels[label]
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *metricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs)
if !ok {
// Copy to avoid allocation in case wo don't go down this code path.
copiedLVs := make([]string, len(lvs))
copy(copiedLVs, lvs)
metric = m.newMetric(copiedLVs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
}
return metric
}
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *metricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithHashAndLabels(hash, labels)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithHashAndLabels(hash, labels)
if !ok {
lvs := m.extractLabelValues(labels)
metric = m.newMetric(lvs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
// getMetricWithHashAndLabelValues gets a metric while handling possible
// collisions in the hash space. Must be called while holding the read mutex.
func (m *metricVec) getMetricWithHashAndLabelValues(h uint64, lvs []string) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// getMetricWithHashAndLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *metricVec) getMetricWithHashAndLabels(h uint64, labels Labels) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
func (m *metricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
for i, metric := range metrics {
if m.matchLabelValues(metric.values, lvs) {
return i
}
}
return len(metrics)
}
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
func (m *metricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
for i, metric := range metrics {
if m.matchLabels(metric.values, labels) {
return i
}
}
return len(metrics)
}
func (m *metricVec) matchLabelValues(values []string, lvs []string) bool {
if len(values) != len(lvs) {
return false
}
for i, v := range values {
if v != lvs[i] {
return false
}
}
return true
}
func (m *metricVec) matchLabels(values []string, labels Labels) bool {
if len(labels) != len(values) {
return false
}
for i, k := range m.desc.variableLabels {
if values[i] != labels[k] {
return false
}
}
return true
}
func (m *metricVec) extractLabelValues(labels Labels) []string {
labelValues := make([]string, len(labels))
for i, k := range m.desc.variableLabels {
labelValues[i] = labels[k]
}
return labelValues
}

View File

@@ -0,0 +1,312 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"testing"
dto "github.com/prometheus/client_model/go"
)
func TestDelete(t *testing.T) {
vec := NewGaugeVec(
GaugeOpts{
Name: "test",
Help: "helpless",
},
[]string{"l1", "l2"},
)
testDelete(t, vec)
}
func TestDeleteWithCollisions(t *testing.T) {
vec := NewGaugeVec(
GaugeOpts{
Name: "test",
Help: "helpless",
},
[]string{"l1", "l2"},
)
vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
testDelete(t, vec)
}
func testDelete(t *testing.T, vec *GaugeVec) {
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
}
func TestDeleteLabelValues(t *testing.T) {
vec := NewGaugeVec(
GaugeOpts{
Name: "test",
Help: "helpless",
},
[]string{"l1", "l2"},
)
testDeleteLabelValues(t, vec)
}
func TestDeleteLabelValuesWithCollisions(t *testing.T) {
vec := NewGaugeVec(
GaugeOpts{
Name: "test",
Help: "helpless",
},
[]string{"l1", "l2"},
)
vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
testDeleteLabelValues(t, vec)
}
func testDeleteLabelValues(t *testing.T, vec *GaugeVec) {
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision.
if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
// Delete out of order.
if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
if got, want := vec.DeleteLabelValues("v1"), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
}
func TestMetricVec(t *testing.T) {
vec := NewGaugeVec(
GaugeOpts{
Name: "test",
Help: "helpless",
},
[]string{"l1", "l2"},
)
testMetricVec(t, vec)
}
func TestMetricVecWithCollisions(t *testing.T) {
vec := NewGaugeVec(
GaugeOpts{
Name: "test",
Help: "helpless",
},
[]string{"l1", "l2"},
)
vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
testMetricVec(t, vec)
}
func testMetricVec(t *testing.T, vec *GaugeVec) {
vec.Reset() // Actually test Reset now!
var pair [2]string
// Keep track of metrics.
expected := map[[2]string]int{}
for i := 0; i < 1000; i++ {
pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
expected[pair]++
vec.WithLabelValues(pair[0], pair[1]).Inc()
expected[[2]string{"v1", "v2"}]++
vec.WithLabelValues("v1", "v2").(Gauge).Inc()
}
var total int
for _, metrics := range vec.children {
for _, metric := range metrics {
total++
copy(pair[:], metric.values)
var metricOut dto.Metric
if err := metric.metric.Write(&metricOut); err != nil {
t.Fatal(err)
}
actual := *metricOut.Gauge.Value
var actualPair [2]string
for i, label := range metricOut.Label {
actualPair[i] = *label.Value
}
// Test output pair against metric.values to ensure we've selected
// the right one. We check this to ensure the below check means
// anything at all.
if actualPair != pair {
t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair)
}
if actual != float64(expected[pair]) {
t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair])
}
}
}
if total != len(expected) {
t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected))
}
vec.Reset()
if len(vec.children) > 0 {
t.Fatalf("reset failed")
}
}
func TestCounterVecEndToEndWithCollision(t *testing.T) {
vec := NewCounterVec(
CounterOpts{
Name: "test",
Help: "helpless",
},
[]string{"labelname"},
)
vec.WithLabelValues("77kepQFQ8Kl").Inc()
vec.WithLabelValues("!0IC=VloaY").Add(2)
m := &dto.Metric{}
if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil {
t.Fatal(err)
}
if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want {
t.Errorf("got label value %q, want %q", got, want)
}
if got, want := m.GetCounter().GetValue(), 1.; got != want {
t.Errorf("got value %f, want %f", got, want)
}
m.Reset()
if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil {
t.Fatal(err)
}
if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want {
t.Errorf("got label value %q, want %q", got, want)
}
if got, want := m.GetCounter().GetValue(), 2.; got != want {
t.Errorf("got value %f, want %f", got, want)
}
}
func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) {
benchmarkMetricVecWithLabelValues(b, map[string][]string{
"l1": {"onevalue"},
"l2": {"twovalue"},
})
}
func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) {
benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10)
}
func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) {
benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10)
}
func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) {
benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100)
}
func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) {
benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100)
}
func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) {
benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000)
}
func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) {
labels := map[string][]string{}
for i := 0; i < nkeys; i++ {
var (
k = fmt.Sprintf("key-%v", i)
vs = make([]string, 0, nvalues)
)
for j := 0; j < nvalues; j++ {
vs = append(vs, fmt.Sprintf("value-%v", j))
}
labels[k] = vs
}
benchmarkMetricVecWithLabelValues(b, labels)
}
func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) {
var keys []string
for k := range labels { // Map order dependent, who cares though.
keys = append(keys, k)
}
values := make([]string, len(labels)) // Value cache for permutations.
vec := NewGaugeVec(
GaugeOpts{
Name: "test",
Help: "helpless",
},
keys,
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Varies input across provide map entries based on key size.
for j, k := range keys {
candidates := labels[k]
values[j] = candidates[i%len(candidates)]
}
vec.WithLabelValues(values...)
}
}