1// Copyright 2016 The Prometheus Authors
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6// http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14// Package promhttp provides tooling around HTTP servers and clients.
15//
16// First, the package allows the creation of http.Handler instances to expose
17// Prometheus metrics via HTTP. promhttp.Handler acts on the
18// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
19// custom registry or anything that implements the Gatherer interface. It also
20// allows the creation of handlers that act differently on errors or allow to
21// log errors.
22//
23// Second, the package provides tooling to instrument instances of http.Handler
24// via middleware. Middleware wrappers follow the naming scheme
25// InstrumentHandlerX, where X describes the intended use of the middleware.
26// See each function's doc comment for specific details.
27//
28// Finally, the package allows for an http.RoundTripper to be instrumented via
29// middleware. Middleware wrappers follow the naming scheme
30// InstrumentRoundTripperX, where X describes the intended use of the
31// middleware. See each function's doc comment for specific details.
32package promhttp
33
34import (
35	"compress/gzip"
36	"fmt"
37	"io"
38	"net/http"
39	"strings"
40	"sync"
41	"time"
42
43	"github.com/prometheus/common/expfmt"
44
45	"github.com/prometheus/client_golang/prometheus"
46)
47
48const (
49	contentTypeHeader     = "Content-Type"
50	contentLengthHeader   = "Content-Length"
51	contentEncodingHeader = "Content-Encoding"
52	acceptEncodingHeader  = "Accept-Encoding"
53)
54
55var gzipPool = sync.Pool{
56	New: func() interface{} {
57		return gzip.NewWriter(nil)
58	},
59}
60
61// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
62// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
63// no error logging, and it applies compression if requested by the client.
64//
65// The returned http.Handler is already instrumented using the
66// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
67// create multiple http.Handlers by separate calls of the Handler function, the
68// metrics used for instrumentation will be shared between them, providing
69// global scrape counts.
70//
71// This function is meant to cover the bulk of basic use cases. If you are doing
72// anything that requires more customization (including using a non-default
73// Gatherer, different instrumentation, and non-default HandlerOpts), use the
74// HandlerFor function. See there for details.
75func Handler() http.Handler {
76	return InstrumentMetricHandler(
77		prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
78	)
79}
80
81// HandlerFor returns an uninstrumented http.Handler for the provided
82// Gatherer. The behavior of the Handler is defined by the provided
83// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
84// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
85// instrumentation. Use the InstrumentMetricHandler function to apply the same
86// kind of instrumentation as it is used by the Handler function.
87func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
88	var inFlightSem chan struct{}
89	if opts.MaxRequestsInFlight > 0 {
90		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
91	}
92
93	h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
94		if inFlightSem != nil {
95			select {
96			case inFlightSem <- struct{}{}: // All good, carry on.
97				defer func() { <-inFlightSem }()
98			default:
99				http.Error(rsp, fmt.Sprintf(
100					"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
101				), http.StatusServiceUnavailable)
102				return
103			}
104		}
105		mfs, err := reg.Gather()
106		if err != nil {
107			if opts.ErrorLog != nil {
108				opts.ErrorLog.Println("error gathering metrics:", err)
109			}
110			switch opts.ErrorHandling {
111			case PanicOnError:
112				panic(err)
113			case ContinueOnError:
114				if len(mfs) == 0 {
115					// Still report the error if no metrics have been gathered.
116					httpError(rsp, err)
117					return
118				}
119			case HTTPErrorOnError:
120				httpError(rsp, err)
121				return
122			}
123		}
124
125		contentType := expfmt.Negotiate(req.Header)
126		header := rsp.Header()
127		header.Set(contentTypeHeader, string(contentType))
128
129		w := io.Writer(rsp)
130		if !opts.DisableCompression && gzipAccepted(req.Header) {
131			header.Set(contentEncodingHeader, "gzip")
132			gz := gzipPool.Get().(*gzip.Writer)
133			defer gzipPool.Put(gz)
134
135			gz.Reset(w)
136			defer gz.Close()
137
138			w = gz
139		}
140
141		enc := expfmt.NewEncoder(w, contentType)
142
143		var lastErr error
144		for _, mf := range mfs {
145			if err := enc.Encode(mf); err != nil {
146				lastErr = err
147				if opts.ErrorLog != nil {
148					opts.ErrorLog.Println("error encoding and sending metric family:", err)
149				}
150				switch opts.ErrorHandling {
151				case PanicOnError:
152					panic(err)
153				case ContinueOnError:
154					// Handled later.
155				case HTTPErrorOnError:
156					httpError(rsp, err)
157					return
158				}
159			}
160		}
161
162		if lastErr != nil {
163			httpError(rsp, lastErr)
164		}
165	})
166
167	if opts.Timeout <= 0 {
168		return h
169	}
170	return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
171		"Exceeded configured timeout of %v.\n",
172		opts.Timeout,
173	))
174}
175
176// InstrumentMetricHandler is usually used with an http.Handler returned by the
177// HandlerFor function. It instruments the provided http.Handler with two
178// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
179// scrapes partitioned by HTTP status code, and a gauge
180// "promhttp_metric_handler_requests_in_flight" to track the number of
181// simultaneous scrapes. This function idempotently registers collectors for
182// both metrics with the provided Registerer. It panics if the registration
183// fails. The provided metrics are useful to see how many scrapes hit the
184// monitored target (which could be from different Prometheus servers or other
185// scrapers), and how often they overlap (which would result in more than one
186// scrape in flight at the same time). Note that the scrapes-in-flight gauge
187// will contain the scrape by which it is exposed, while the scrape counter will
188// only get incremented after the scrape is complete (as only then the status
189// code is known). For tracking scrape durations, use the
190// "scrape_duration_seconds" gauge created by the Prometheus server upon each
191// scrape.
192func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
193	cnt := prometheus.NewCounterVec(
194		prometheus.CounterOpts{
195			Name: "promhttp_metric_handler_requests_total",
196			Help: "Total number of scrapes by HTTP status code.",
197		},
198		[]string{"code"},
199	)
200	// Initialize the most likely HTTP status codes.
201	cnt.WithLabelValues("200")
202	cnt.WithLabelValues("500")
203	cnt.WithLabelValues("503")
204	if err := reg.Register(cnt); err != nil {
205		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
206			cnt = are.ExistingCollector.(*prometheus.CounterVec)
207		} else {
208			panic(err)
209		}
210	}
211
212	gge := prometheus.NewGauge(prometheus.GaugeOpts{
213		Name: "promhttp_metric_handler_requests_in_flight",
214		Help: "Current number of scrapes being served.",
215	})
216	if err := reg.Register(gge); err != nil {
217		if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
218			gge = are.ExistingCollector.(prometheus.Gauge)
219		} else {
220			panic(err)
221		}
222	}
223
224	return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
225}
226
227// HandlerErrorHandling defines how a Handler serving metrics will handle
228// errors.
229type HandlerErrorHandling int
230
231// These constants cause handlers serving metrics to behave as described if
232// errors are encountered.
233const (
234	// Serve an HTTP status code 500 upon the first error
235	// encountered. Report the error message in the body.
236	HTTPErrorOnError HandlerErrorHandling = iota
237	// Ignore errors and try to serve as many metrics as possible.  However,
238	// if no metrics can be served, serve an HTTP status code 500 and the
239	// last error message in the body. Only use this in deliberate "best
240	// effort" metrics collection scenarios. It is recommended to at least
241	// log errors (by providing an ErrorLog in HandlerOpts) to not mask
242	// errors completely.
243	ContinueOnError
244	// Panic upon the first error encountered (useful for "crash only" apps).
245	PanicOnError
246)
247
248// Logger is the minimal interface HandlerOpts needs for logging. Note that
249// log.Logger from the standard library implements this interface, and it is
250// easy to implement by custom loggers, if they don't do so already anyway.
251type Logger interface {
252	Println(v ...interface{})
253}
254
255// HandlerOpts specifies options how to serve metrics via an http.Handler. The
256// zero value of HandlerOpts is a reasonable default.
257type HandlerOpts struct {
258	// ErrorLog specifies an optional logger for errors collecting and
259	// serving metrics. If nil, errors are not logged at all.
260	ErrorLog Logger
261	// ErrorHandling defines how errors are handled. Note that errors are
262	// logged regardless of the configured ErrorHandling provided ErrorLog
263	// is not nil.
264	ErrorHandling HandlerErrorHandling
265	// If DisableCompression is true, the handler will never compress the
266	// response, even if requested by the client.
267	DisableCompression bool
268	// The number of concurrent HTTP requests is limited to
269	// MaxRequestsInFlight. Additional requests are responded to with 503
270	// Service Unavailable and a suitable message in the body. If
271	// MaxRequestsInFlight is 0 or negative, no limit is applied.
272	MaxRequestsInFlight int
273	// If handling a request takes longer than Timeout, it is responded to
274	// with 503 ServiceUnavailable and a suitable Message. No timeout is
275	// applied if Timeout is 0 or negative. Note that with the current
276	// implementation, reaching the timeout simply ends the HTTP requests as
277	// described above (and even that only if sending of the body hasn't
278	// started yet), while the bulk work of gathering all the metrics keeps
279	// running in the background (with the eventual result to be thrown
280	// away). Until the implementation is improved, it is recommended to
281	// implement a separate timeout in potentially slow Collectors.
282	Timeout time.Duration
283}
284
285// gzipAccepted returns whether the client will accept gzip-encoded content.
286func gzipAccepted(header http.Header) bool {
287	a := header.Get(acceptEncodingHeader)
288	parts := strings.Split(a, ",")
289	for _, part := range parts {
290		part = strings.TrimSpace(part)
291		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
292			return true
293		}
294	}
295	return false
296}
297
298// httpError removes any content-encoding header and then calls http.Error with
299// the provided error and http.StatusInternalServerErrer. Error contents is
300// supposed to be uncompressed plain text. However, same as with a plain
301// http.Error, any header settings will be void if the header has already been
302// sent. The error message will still be written to the writer, but it will
303// probably be of limited use.
304func httpError(rsp http.ResponseWriter, err error) {
305	rsp.Header().Del(contentEncodingHeader)
306	http.Error(
307		rsp,
308		"An error has occurred while serving metrics:\n\n"+err.Error(),
309		http.StatusInternalServerError,
310	)
311}
312