Update godeps

This commit is contained in:
Manuel de Brito Fontes 2016-11-28 22:39:32 -03:00
parent 86dbf979cb
commit f7011d22f8
108 changed files with 7093 additions and 4947 deletions

825
Godeps/Godeps.json generated

File diff suppressed because it is too large Load diff

View file

@ -90,7 +90,7 @@ type LoadBalancerController struct {
func NewLoadBalancerController(kubeClient client.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(unversionedcore.EventSinkImpl{
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
Interface: kubeClient.Core().Events(""),
})
lbc := LoadBalancerController{

View file

@ -0,0 +1,4 @@
.*.sw?
process-exporter
.tarballs
process-exporter-*.tar.gz

View file

@ -0,0 +1,35 @@
repository:
path: github.com/ncabatoff/process-exporter
build:
binaries:
- name: process-exporter
path: ./cmd/process-exporter
flags: -a -tags netgo
tarball:
files:
- LICENSE
crossbuild:
platforms:
- linux/amd64
- linux/386
- darwin/amd64
- darwin/386
- freebsd/amd64
- freebsd/386
- openbsd/amd64
- openbsd/386
- netbsd/amd64
- netbsd/386
- dragonfly/amd64
- linux/arm
- linux/arm64
- freebsd/arm
# Temporarily deactivated as golang.org/x/sys does not have syscalls
# implemented for that os/platform combination.
#- openbsd/arm
#- linux/mips64
#- linux/mips64le
- netbsd/arm
- linux/ppc64
- linux/ppc64le

View file

@ -0,0 +1,17 @@
# Start from a Debian image with the latest version of Go installed
# and a workspace (GOPATH) configured at /go.
FROM golang
# Copy the local package files to the container's workspace.
ADD . /go/src/github.com/ncabatoff/process-exporter
# Build the process-exporter command inside the container.
RUN make -C /go/src/github.com/ncabatoff/process-exporter
USER root
# Run the process-exporter command by default when the container starts.
ENTRYPOINT ["/go/src/github.com/ncabatoff/process-exporter/process-exporter"]
# Document that the service listens on port 9256.
EXPOSE 9256

21
vendor/github.com/ncabatoff/process-exporter/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 ncabatoff
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

71
vendor/github.com/ncabatoff/process-exporter/Makefile generated vendored Normal file
View file

@ -0,0 +1,71 @@
# Copyright 2015 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GO := GO15VENDOREXPERIMENT=1 go
FIRST_GOPATH := $(firstword $(subst :, ,$(GOPATH)))
PROMU := $(FIRST_GOPATH)/bin/promu
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_NAME ?= process-exporter
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
ifdef DEBUG
bindata_flags = -debug
endif
all: format build test
style:
@echo ">> checking code style"
@! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
test:
@echo ">> running short tests"
@$(GO) test -short $(pkgs)
format:
@echo ">> formatting code"
@$(GO) fmt $(pkgs)
vet:
@echo ">> vetting code"
@$(GO) vet $(pkgs)
build: promu
@echo ">> building binaries"
@$(PROMU) build --prefix $(PREFIX)
tarball: promu
@echo ">> building release tarball"
@$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
crossbuild: promu
@echo ">> cross-building"
@$(PROMU) crossbuild
@$(PROMU) crossbuild tarballs
docker:
@echo ">> building docker image"
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
promu:
@echo ">> fetching promu"
@GOOS=$(shell uname -s | tr A-Z a-z) \
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
$(GO) get -u github.com/prometheus/promu
.PHONY: all style format build test vet tarball crossbuild docker promu

160
vendor/github.com/ncabatoff/process-exporter/README.md generated vendored Normal file
View file

@ -0,0 +1,160 @@
# process-exporter
Prometheus exporter that mines /proc to report on selected processes.
The premise for this exporter is that sometimes you have apps that are
impractical to instrument directly, either because you don't control the code
or they're written in a language that isn't easy to instrument with Prometheus.
A fair bit of information can be gleaned from /proc, especially for
long-running programs.
For most systems it won't be beneficial to create metrics for every process by
name: there are just too many of them and most don't do enough to merit it.
Various command-line options are provided to control how processes are grouped
and the groups are named. Run "process-exporter -man" to see a help page
giving details.
Metrics available currently include CPU usage, bytes written and read, and
number of processes in each group.
Bytes read and written come from /proc/[pid]/io in recent enough kernels.
These correspond to the fields `read_bytes` and `write_bytes` respectively.
These IO stats come with plenty of caveats, see either the Linux kernel
documentation or man 5 proc.
CPU usage comes from /proc/[pid]/stat fields utime (user time) and stime (system
time.) It has been translated into fractional seconds of CPU consumed. Since
it is a counter, using rate() will tell you how many fractional cores were running
code from this process during the interval given.
An example Grafana dashboard to view the metrics is available at https://grafana.net/dashboards/249
## Instrumentation cost
process-exporter will consume CPU in proportion to the number of processes in
the system and the rate at which new ones are created. The most expensive
parts - applying regexps and executing templates - are only applied once per
process seen. If you have mostly long-running processes process-exporter
should be lightweight: each time a scrape occurs, parsing of /proc/$pid/stat
and /proc/$pid/cmdline for every process being monitored and adding a few
numbers.
## Config
To select and group the processes to monitor, either provide command-line
arguments or use a YAML configuration file.
To avoid confusion with the cmdline YAML element, we'll refer to the
null-delimited contents of `/proc/<pid>/cmdline` as the array `argv[]`.
Each item in `process_names` gives a recipe for identifying and naming
processes. The optional `name` tag defines a template to use to name
matching processes; if not specified, `name` defaults to `{{.ExeBase}}`.
Template variables available:
- `{{.ExeBase}}` contains the basename of the executable
- `{{.ExeFull}}` contains the fully qualified path of the executable
- `{{.Matches}}` map contains all the matches resulting from applying cmdline regexps
Each item in `process_names` must contain one or more selectors (`comm`, `exe`
or `cmdline`); if more than one selector is present, they must all match. Each
selector is a list of strings to match against a process's `comm`, `argv[0]`,
or in the case of `cmdline`, a regexp to apply to the command line.
For `comm` and `exe`, the list of strings is an OR, meaning any process
matching any of the strings will be added to the item's group.
For `cmdline`, the list of regexes is an AND, meaning they all must match. Any
capturing groups in a regexp must use the `?P<name>` option to assign a name to
the capture, which is used to populate `.Matches`.
A process may only belong to one group: even if multiple items would match, the
first one listed in the file wins.
Other performance tips: give an exe or comm clause in addition to any cmdline
clause, so you avoid executing the regexp when the executable name doesn't
match.
```
process_names:
# comm is the second field of /proc/<pid>/stat minus parens.
# It is the base executable name, truncated at 15 chars.
# It cannot be modified by the program, unlike exe.
- comm:
- bash
# exe is argv[0]. If no slashes, only basename of argv[0] need match.
# If exe contains slashes, argv[0] must match exactly.
- exe:
- postgres
- /usr/local/bin/prometheus
# cmdline is a list of regexps applied to argv.
# Each must match, and any captures are added to the .Matches map.
- name: "{{.ExeFull}}:{{.Matches.Cfgfile}}"
exe:
- /usr/local/bin/process-exporter
cmdline:
- -config.path\\s+(?P<Cfgfile>\\S+)
```
Here's the config I use on my home machine:
```
process_names:
- comm:
- chromium-browse
- bash
- prometheus
- gvim
- exe:
- /sbin/upstart
cmdline:
- --user
name: upstart:-user
```
## Docker
A docker image can be created with
```
make docker
```
Then run the docker, e.g.
```
docker run --privileged --name pexporter -d -v /proc:/host/proc -p 127.0.0.1:9256:9256 process-exporter:master -procfs /host/proc -procnames chromium-browse,bash,prometheus,gvim,upstart:-user -namemapping "upstart,(-user)"
```
This will expose metrics on http://localhost:9256/metrics. Leave off the
`127.0.0.1:` to publish on all interfaces. Leave off the --priviliged and
add the --user docker run argument if you only need to monitor processes
belonging to a single user.
## History
An earlier version of this exporter had options to enable auto-discovery of
which processes were consuming resources. This functionality has been removed.
These options were based on a percentage of resource usage, e.g. if an
untracked process consumed X% of CPU during a scrape, start tracking processes
with that name. However during any given scrape it's likely that most
processes are idle, so we could add a process that consumes minimal resources
but which happened to be active during the interval preceding the current
scrape. Over time this means that a great many processes wind up being
scraped, which becomes unmanageable to visualize. This could be mitigated by
looking at resource usage over longer intervals, but ultimately I didn't feel
this feature was important enough to invest more time in at this point. It may
re-appear at some point in the future, but no promises.
Another lost feature: the "other" group was used to count usage by non-tracked
procs. This was useful to get an idea of what wasn't being monitored. But it
comes at a high cost: if you know what processes you care about, you're wasting
a lot of CPU to compute the usage of everything else that you don't care about.
The new approach is to minimize resources expended on non-tracked processes and
to require the user to whitelist the processes to track.

1
vendor/github.com/ncabatoff/process-exporter/VERSION generated vendored Normal file
View file

@ -0,0 +1 @@
0.1.0

14
vendor/github.com/ncabatoff/process-exporter/common.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
package common
type (
NameAndCmdline struct {
Name string
Cmdline []string
}
MatchNamer interface {
// MatchAndName returns false if the match failed, otherwise
// true and the resulting name.
MatchAndName(NameAndCmdline) (bool, string)
}
)

View file

@ -0,0 +1,166 @@
package proc
import (
common "github.com/ncabatoff/process-exporter"
"time"
)
type (
Grouper struct {
namer common.MatchNamer
trackChildren bool
// track how much was seen last time so we can report the delta
GroupStats map[string]Counts
tracker *Tracker
}
GroupCountMap map[string]GroupCounts
GroupCounts struct {
Counts
Procs int
Memresident uint64
Memvirtual uint64
OldestStartTime time.Time
}
)
func NewGrouper(trackChildren bool, namer common.MatchNamer) *Grouper {
g := Grouper{
trackChildren: trackChildren,
namer: namer,
GroupStats: make(map[string]Counts),
tracker: NewTracker(),
}
return &g
}
func (g *Grouper) checkAncestry(idinfo ProcIdInfo, newprocs map[ProcId]ProcIdInfo) string {
ppid := idinfo.ParentPid
pProcId := g.tracker.ProcIds[ppid]
if pProcId.Pid < 1 {
// Reached root of process tree without finding a tracked parent.
g.tracker.Ignore(idinfo.ProcId)
return ""
}
// Is the parent already known to the tracker?
if ptproc, ok := g.tracker.Tracked[pProcId]; ok {
if ptproc != nil {
// We've found a tracked parent.
g.tracker.Track(ptproc.GroupName, idinfo)
return ptproc.GroupName
} else {
// We've found an untracked parent.
g.tracker.Ignore(idinfo.ProcId)
return ""
}
}
// Is the parent another new process?
if pinfoid, ok := newprocs[pProcId]; ok {
if name := g.checkAncestry(pinfoid, newprocs); name != "" {
// We've found a tracked parent, which implies this entire lineage should be tracked.
g.tracker.Track(name, idinfo)
return name
}
}
// Parent is dead, i.e. we never saw it, or there's no tracked proc in our ancestry.
g.tracker.Ignore(idinfo.ProcId)
return ""
}
// Update tracks any new procs that should be according to policy, and updates
// the metrics for already tracked procs. Permission errors are returned as a
// count, and will not affect the error return value.
func (g *Grouper) Update(iter ProcIter) (int, error) {
newProcs, permErrs, err := g.tracker.Update(iter)
if err != nil {
return permErrs, err
}
// Step 1: track any new proc that should be tracked based on its name and cmdline.
untracked := make(map[ProcId]ProcIdInfo)
for _, idinfo := range newProcs {
wanted, gname := g.namer.MatchAndName(common.NameAndCmdline{idinfo.Name, idinfo.Cmdline})
if !wanted {
untracked[idinfo.ProcId] = idinfo
continue
}
g.tracker.Track(gname, idinfo)
}
// Step 2: track any untracked new proc that should be tracked because its parent is tracked.
if !g.trackChildren {
return permErrs, nil
}
for _, idinfo := range untracked {
if _, ok := g.tracker.Tracked[idinfo.ProcId]; ok {
// Already tracked or ignored
continue
}
g.checkAncestry(idinfo, untracked)
}
return permErrs, nil
}
// groups returns the aggregate metrics for all groups tracked. This reflects
// solely what's currently running.
func (g *Grouper) groups() GroupCountMap {
gcounts := make(GroupCountMap)
var zeroTime time.Time
for _, tinfo := range g.tracker.Tracked {
if tinfo == nil {
continue
}
cur := gcounts[tinfo.GroupName]
cur.Procs++
_, counts, mem, start := tinfo.GetStats()
cur.Memresident += mem.Resident
cur.Memvirtual += mem.Virtual
cur.Counts.Cpu += counts.Cpu
cur.Counts.ReadBytes += counts.ReadBytes
cur.Counts.WriteBytes += counts.WriteBytes
if cur.OldestStartTime == zeroTime || start.Before(cur.OldestStartTime) {
cur.OldestStartTime = start
}
gcounts[tinfo.GroupName] = cur
}
return gcounts
}
// Groups returns GroupCounts with Counts that never decrease in value from one
// call to the next. Even if processes exit, their CPU and IO contributions up
// to that point are included in the results. Even if no processes remain
// in a group it will still be included in the results.
func (g *Grouper) Groups() GroupCountMap {
groups := g.groups()
// First add any accumulated counts to what was just observed,
// and update the accumulators.
for gname, group := range groups {
if oldcounts, ok := g.GroupStats[gname]; ok {
group.Counts.Cpu += oldcounts.Cpu
group.Counts.ReadBytes += oldcounts.ReadBytes
group.Counts.WriteBytes += oldcounts.WriteBytes
}
g.GroupStats[gname] = group.Counts
groups[gname] = group
}
// Now add any groups that were observed in the past but aren't running now.
for gname, gcounts := range g.GroupStats {
if _, ok := groups[gname]; !ok {
groups[gname] = GroupCounts{Counts: gcounts}
}
}
return groups
}

View file

@ -0,0 +1,306 @@
package proc
import (
"fmt"
"github.com/prometheus/procfs"
"time"
)
func newProcIdStatic(pid, ppid int, startTime uint64, name string, cmdline []string) ProcIdStatic {
return ProcIdStatic{ProcId{pid, startTime}, ProcStatic{name, cmdline, ppid, time.Time{}}}
}
type (
// ProcId uniquely identifies a process.
ProcId struct {
// UNIX process id
Pid int
// The time the process started after system boot, the value is expressed
// in clock ticks.
StartTimeRel uint64
}
// ProcStatic contains data read from /proc/pid/*
ProcStatic struct {
Name string
Cmdline []string
ParentPid int
StartTime time.Time
}
// ProcMetrics contains data read from /proc/pid/*
ProcMetrics struct {
CpuTime float64
ReadBytes uint64
WriteBytes uint64
ResidentBytes uint64
VirtualBytes uint64
}
ProcIdStatic struct {
ProcId
ProcStatic
}
ProcInfo struct {
ProcStatic
ProcMetrics
}
ProcIdInfo struct {
ProcId
ProcStatic
ProcMetrics
}
// Proc wraps the details of the underlying procfs-reading library.
Proc interface {
// GetPid() returns the POSIX PID (process id). They may be reused over time.
GetPid() int
// GetProcId() returns (pid,starttime), which can be considered a unique process id.
// It may fail if the caller doesn't have permission to read /proc/<pid>/stat, or if
// the process has disapeared.
GetProcId() (ProcId, error)
// GetStatic() returns various details read from files under /proc/<pid>/. Technically
// name may not be static, but we'll pretend it is.
// It may fail if the caller doesn't have permission to read those files, or if
// the process has disapeared.
GetStatic() (ProcStatic, error)
// GetMetrics() returns various metrics read from files under /proc/<pid>/.
// It may fail if the caller doesn't have permission to read those files, or if
// the process has disapeared.
GetMetrics() (ProcMetrics, error)
}
// proc is a wrapper for procfs.Proc that caches results of some reads and implements Proc.
proc struct {
procfs.Proc
procid *ProcId
stat *procfs.ProcStat
cmdline []string
io *procfs.ProcIO
bootTime int64
}
procs interface {
get(int) Proc
length() int
}
procfsprocs struct {
Procs []procfs.Proc
bootTime int64
}
// ProcIter is an iterator over a sequence of procs.
ProcIter interface {
// Next returns true if the iterator is not exhausted.
Next() bool
// Close releases any resources the iterator uses.
Close() error
// The iterator satisfies the Proc interface.
Proc
}
// procIterator implements the ProcIter interface using procfs.
procIterator struct {
// procs is the list of Proc we're iterating over.
procs
// idx is the current iteration, i.e. it's an index into procs.
idx int
// err is set with an error when Next() fails. It is not affected by failures accessing
// the current iteration variable, e.g. with GetProcId.
err error
// Proc is the current iteration variable, or nil if Next() has never been called or the
// iterator is exhausted.
Proc
}
procIdInfos []ProcIdInfo
)
func procInfoIter(ps ...ProcIdInfo) ProcIter {
return &procIterator{procs: procIdInfos(ps), idx: -1}
}
func Info(p Proc) (ProcIdInfo, error) {
id, err := p.GetProcId()
if err != nil {
return ProcIdInfo{}, err
}
static, err := p.GetStatic()
if err != nil {
return ProcIdInfo{}, err
}
metrics, err := p.GetMetrics()
if err != nil {
return ProcIdInfo{}, err
}
return ProcIdInfo{id, static, metrics}, nil
}
func (p procIdInfos) get(i int) Proc {
return &p[i]
}
func (p procIdInfos) length() int {
return len(p)
}
func (p ProcIdInfo) GetPid() int {
return p.ProcId.Pid
}
func (p ProcIdInfo) GetProcId() (ProcId, error) {
return p.ProcId, nil
}
func (p ProcIdInfo) GetStatic() (ProcStatic, error) {
return p.ProcStatic, nil
}
func (p ProcIdInfo) GetMetrics() (ProcMetrics, error) {
return p.ProcMetrics, nil
}
func (p procfsprocs) get(i int) Proc {
return &proc{Proc: p.Procs[i], bootTime: p.bootTime}
}
func (p procfsprocs) length() int {
return len(p.Procs)
}
func (p *proc) GetPid() int {
return p.Proc.PID
}
func (p *proc) GetStat() (procfs.ProcStat, error) {
if p.stat == nil {
stat, err := p.Proc.NewStat()
if err != nil {
return procfs.ProcStat{}, err
}
p.stat = &stat
}
return *p.stat, nil
}
func (p *proc) GetProcId() (ProcId, error) {
if p.procid == nil {
stat, err := p.GetStat()
if err != nil {
return ProcId{}, err
}
p.procid = &ProcId{Pid: p.GetPid(), StartTimeRel: stat.Starttime}
}
return *p.procid, nil
}
func (p *proc) GetCmdLine() ([]string, error) {
if p.cmdline == nil {
cmdline, err := p.Proc.CmdLine()
if err != nil {
return nil, err
}
p.cmdline = cmdline
}
return p.cmdline, nil
}
func (p *proc) GetIo() (procfs.ProcIO, error) {
if p.io == nil {
io, err := p.Proc.NewIO()
if err != nil {
return procfs.ProcIO{}, err
}
p.io = &io
}
return *p.io, nil
}
func (p proc) GetStatic() (ProcStatic, error) {
cmdline, err := p.GetCmdLine()
if err != nil {
return ProcStatic{}, err
}
stat, err := p.GetStat()
if err != nil {
return ProcStatic{}, err
}
startTime := time.Unix(p.bootTime, 0)
startTime = startTime.Add(time.Second / userHZ * time.Duration(stat.Starttime))
return ProcStatic{
Name: stat.Comm,
Cmdline: cmdline,
ParentPid: stat.PPID,
StartTime: startTime,
}, nil
}
func (p proc) GetMetrics() (ProcMetrics, error) {
io, err := p.GetIo()
if err != nil {
return ProcMetrics{}, err
}
stat, err := p.GetStat()
if err != nil {
return ProcMetrics{}, err
}
return ProcMetrics{
CpuTime: stat.CPUTime(),
ReadBytes: io.ReadBytes,
WriteBytes: io.WriteBytes,
ResidentBytes: uint64(stat.ResidentMemory()),
VirtualBytes: uint64(stat.VirtualMemory()),
}, nil
}
type FS struct {
procfs.FS
BootTime int64
}
// See https://github.com/prometheus/procfs/blob/master/proc_stat.go for details on userHZ.
const userHZ = 100
// NewFS returns a new FS mounted under the given mountPoint. It will error
// if the mount point can't be read.
func NewFS(mountPoint string) (*FS, error) {
fs, err := procfs.NewFS(mountPoint)
if err != nil {
return nil, err
}
stat, err := fs.NewStat()
if err != nil {
return nil, err
}
return &FS{fs, stat.BootTime}, nil
}
func (fs *FS) AllProcs() ProcIter {
procs, err := fs.FS.AllProcs()
if err != nil {
err = fmt.Errorf("Error reading procs: %v", err)
}
return &procIterator{procs: procfsprocs{procs, fs.BootTime}, err: err, idx: -1}
}
func (pi *procIterator) Next() bool {
pi.idx++
if pi.idx < pi.procs.length() {
pi.Proc = pi.procs.get(pi.idx)
} else {
pi.Proc = nil
}
return pi.idx < pi.procs.length()
}
func (pi *procIterator) Close() error {
pi.Next()
pi.procs = nil
pi.Proc = nil
return pi.err
}

View file

@ -0,0 +1,160 @@
package proc
import (
"fmt"
"os"
"time"
)
type (
Counts struct {
Cpu float64
ReadBytes uint64
WriteBytes uint64
}
Memory struct {
Resident uint64
Virtual uint64
}
// Tracker tracks processes and records metrics.
Tracker struct {
// Tracked holds the processes are being monitored. Processes
// may be blacklisted such that they no longer get tracked by
// setting their value in the Tracked map to nil.
Tracked map[ProcId]*TrackedProc
// ProcIds is a map from pid to ProcId. This is a convenience
// to allow finding the Tracked entry of a parent process.
ProcIds map[int]ProcId
}
// TrackedProc accumulates metrics for a process, as well as
// remembering an optional GroupName tag associated with it.
TrackedProc struct {
// lastUpdate is used internally during the update cycle to find which procs have exited
lastUpdate time.Time
// info is the most recently obtained info for this proc
info ProcInfo
// accum is the total CPU and IO accrued since we started tracking this proc
accum Counts
// lastaccum is the CPU and IO accrued in the last Update()
lastaccum Counts
// GroupName is an optional tag for this proc.
GroupName string
}
)
func (tp *TrackedProc) GetName() string {
return tp.info.Name
}
func (tp *TrackedProc) GetCmdLine() []string {
return tp.info.Cmdline
}
func (tp *TrackedProc) GetStats() (aggregate, latest Counts, mem Memory, start time.Time) {
return tp.accum, tp.lastaccum, Memory{Resident: tp.info.ResidentBytes, Virtual: tp.info.VirtualBytes}, tp.info.StartTime
}
func NewTracker() *Tracker {
return &Tracker{Tracked: make(map[ProcId]*TrackedProc), ProcIds: make(map[int]ProcId)}
}
func (t *Tracker) Track(groupName string, idinfo ProcIdInfo) {
info := ProcInfo{idinfo.ProcStatic, idinfo.ProcMetrics}
t.Tracked[idinfo.ProcId] = &TrackedProc{GroupName: groupName, info: info}
}
func (t *Tracker) Ignore(id ProcId) {
t.Tracked[id] = nil
}
// Scan procs and update metrics for those which are tracked. Processes that have gone
// away get removed from the Tracked map. New processes are returned, along with the count
// of permission errors.
func (t *Tracker) Update(procs ProcIter) ([]ProcIdInfo, int, error) {
now := time.Now()
var newProcs []ProcIdInfo
var permissionErrors int
for procs.Next() {
procId, err := procs.GetProcId()
if err != nil {
continue
}
last, known := t.Tracked[procId]
// Are we ignoring this proc?
if known && last == nil {
continue
}
// TODO if just the io file is unreadable, should we still return the other metrics?
metrics, err := procs.GetMetrics()
if err != nil {
if os.IsPermission(err) {
permissionErrors++
t.Ignore(procId)
}
continue
}
if known {
var newaccum, lastaccum Counts
dcpu := metrics.CpuTime - last.info.CpuTime
drbytes := metrics.ReadBytes - last.info.ReadBytes
dwbytes := metrics.WriteBytes - last.info.WriteBytes
lastaccum = Counts{Cpu: dcpu, ReadBytes: drbytes, WriteBytes: dwbytes}
newaccum = Counts{
Cpu: last.accum.Cpu + lastaccum.Cpu,
ReadBytes: last.accum.ReadBytes + lastaccum.ReadBytes,
WriteBytes: last.accum.WriteBytes + lastaccum.WriteBytes,
}
last.info.ProcMetrics = metrics
last.lastUpdate = now
last.accum = newaccum
last.lastaccum = lastaccum
} else {
static, err := procs.GetStatic()
if err != nil {
continue
}
newProcs = append(newProcs, ProcIdInfo{procId, static, metrics})
// Is this a new process with the same pid as one we already know?
if oldProcId, ok := t.ProcIds[procId.Pid]; ok {
// Delete it from known, otherwise the cleanup below will remove the
// ProcIds entry we're about to create
delete(t.Tracked, oldProcId)
}
t.ProcIds[procId.Pid] = procId
}
}
err := procs.Close()
if err != nil {
return nil, permissionErrors, fmt.Errorf("Error reading procs: %v", err)
}
// Rather than allocating a new map each time to detect procs that have
// disappeared, we bump the last update time on those that are still
// present. Then as a second pass we traverse the map looking for
// stale procs and removing them.
for procId, pinfo := range t.Tracked {
if pinfo == nil {
// TODO is this a bug? we're not tracking the proc so we don't see it go away so ProcIds
// and Tracked are leaking?
continue
}
if pinfo.lastUpdate != now {
delete(t.Tracked, procId)
delete(t.ProcIds, procId.Pid)
}
}
return newProcs, permissionErrors, nil
}

View file

@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
The following components are included in this product:
goautoneg
http://bitbucket.org/ww/goautoneg
Copyright 2011, Open Knowledge Foundation Ltd.
See README.txt for license details.
perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein

View file

@ -1,53 +1 @@
# Overview
This is the [Prometheus](http://www.prometheus.io) telemetric
instrumentation client [Go](http://golang.org) client library. It
enable authors to define process-space metrics for their servers and
expose them through a web service interface for extraction,
aggregation, and a whole slew of other post processing techniques.
# Installing
$ go get github.com/prometheus/client_golang/prometheus
# Example
```go
package main
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
var (
indexed = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "my_company",
Subsystem: "indexer",
Name: "documents_indexed",
Help: "The number of documents indexed.",
})
size = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "my_company",
Subsystem: "storage",
Name: "documents_total_size_bytes",
Help: "The total size of all documents in the storage.",
})
)
func main() {
http.Handle("/metrics", prometheus.Handler())
indexed.Inc()
size.Set(5)
http.ListenAndServe(":8080", nil)
}
func init() {
prometheus.MustRegister(indexed)
prometheus.MustRegister(size)
}
```
# Documentation
[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).

View file

@ -15,15 +15,15 @@ package prometheus
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
// collection. See Registerer.Register.
//
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
// also Collectors (which only ever collect one metric, namely itself). An
// implementer of Collector may, however, collect multiple metrics in a
// coordinated fashion and/or create metrics on the fly. Examples for collectors
// already implemented in this library are the metric vectors (i.e. collection
// of multiple instances of the same Metric but with different label values)
// like GaugeVec or SummaryVec, and the ExpvarCollector.
// The stock metrics provided by this package (Gauge, Counter, Summary,
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
// namely itself). An implementer of Collector may, however, collect multiple
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
// for collectors already implemented in this library are the metric vectors
// (i.e. collection of multiple instances of the same Metric but with different
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
@ -37,39 +37,39 @@ type Collector interface {
// executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by Prometheus when collecting metrics. The
// implementation sends each collected metric via the provided channel
// and returns once the last metric has been sent. The descriptor of
// each sent metric is one of those returned by Describe. Returned
// metrics that share the same descriptor must differ in their variable
// label values. This method may be called concurrently and must
// therefore be implemented in a concurrency safe way. Blocking occurs
// at the expense of total performance of rendering all registered
// metrics. Ideally, Collector implementations support concurrent
// readers.
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by
// Describe. Returned metrics that share the same descriptor must differ
// in their variable label values. This method may be called
// concurrently and must therefore be implemented in a concurrency safe
// way. Blocking occurs at the expense of total performance of rendering
// all registered metrics. Ideally, Collector implementations support
// concurrent readers.
Collect(chan<- Metric)
}
// SelfCollector implements Collector for a single Metric so that that the
// Metric collects itself. Add it as an anonymous field to a struct that
// implements Metric, and call Init with the Metric itself as an argument.
type SelfCollector struct {
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
type selfCollector struct {
self Metric
}
// Init provides the SelfCollector with a reference to the metric it is supposed
// init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
func (c *SelfCollector) Init(self Metric) {
func (c *selfCollector) init(self Metric) {
c.self = self
}
// Describe implements Collector.
func (c *SelfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
func (c *SelfCollector) Collect(ch chan<- Metric) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}

View file

@ -15,7 +15,6 @@ package prometheus
import (
"errors"
"hash/fnv"
)
// Counter is a Metric that represents a single numerical value that only ever
@ -31,13 +30,8 @@ type Counter interface {
Metric
Collector
// Set is used to set the Counter to an arbitrary value. It is only used
// if you have to transfer a value from an external counter into this
// Prometheus metric. Do not use it for regular handling of a
// Prometheus counter (as it can be used to break the contract of
// monotonically increasing values).
Set(float64)
// Inc increments the counter by 1.
// Inc increments the counter by 1. Use Add to increment it by arbitrary
// non-negative values.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
@ -56,7 +50,7 @@ func NewCounter(opts CounterOpts) Counter {
opts.ConstLabels,
)
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
result.Init(result) // Init self-collection.
result.init(result) // Init self-collection.
return result
}
@ -80,7 +74,7 @@ func (c *counter) Add(v float64) {
// CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation.
type CounterVec struct {
MetricVec
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@ -94,20 +88,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.Init(result) // Init self-collection.
return result
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.init(result) // Init self-collection.
return result
}),
}
}

View file

@ -1,24 +1,30 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"regexp"
"sort"
"strings"
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
var (
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
)
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
@ -67,7 +73,7 @@ type Desc struct {
// Help string. Each Desc with the same fqName must have the same
// dimHash.
dimHash uint64
// err is an error that occured during construction. It is reported on
// err is an error that occurred during construction. It is reported on
// registration time.
err error
}
@ -92,7 +98,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("empty help string")
return d
}
if !metricNameRE.MatchString(fqName) {
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
@ -131,31 +137,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names")
return d
}
h := fnv.New64a()
var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
vh := hashNew()
for _, val := range labelValues {
b.Reset()
b.WriteString(val)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
vh = hashAdd(vh, val)
vh = hashAddByte(vh, separatorByte)
}
d.id = h.Sum64()
d.id = vh
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
h.Reset()
b.Reset()
b.WriteString(help)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
lh := hashNew()
lh = hashAdd(lh, help)
lh = hashAddByte(lh, separatorByte)
for _, labelName := range labelNames {
b.Reset()
b.WriteString(labelName)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
lh = hashAdd(lh, labelName)
lh = hashAddByte(lh, separatorByte)
}
d.dimHash = h.Sum64()
d.dimHash = lh
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {
@ -196,6 +195,6 @@ func (d *Desc) String() string {
}
func checkLabelName(l string) bool {
return labelNameRE.MatchString(l) &&
return model.LabelName(l).IsValid() &&
!strings.HasPrefix(l, reservedLabelPrefix)
}

View file

@ -11,18 +11,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus provides embeddable metric primitives for servers and
// standardized exposition of telemetry through a web services interface.
// Package prometheus provides metrics primitives to instrument code for
// monitoring. It also offers a registry for metrics. Sub-packages allow to
// expose the registered metrics via HTTP (package promhttp) or push them to a
// Pushgateway (package push).
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//specified otherwise.
//
// To expose metrics registered with the Prometheus registry, an HTTP server
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
// A Basic Example
//
// http.Handle("/metrics", prometheus.Handler())
//
// As a starting point a very basic usage example:
// As a starting point, a very basic usage example:
//
// package main
//
@ -30,6 +29,7 @@
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var (
@ -37,73 +37,145 @@
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// })
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// })
// hdFailures = prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// )
// )
//
// func init() {
// // Metrics have to be registered to be exposed:
// prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures)
// }
//
// func main() {
// cpuTemp.Set(65.3)
// hdFailures.Inc()
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
// http.Handle("/metrics", prometheus.Handler())
// http.ListenAndServe(":8080", nil)
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
//
// This is a complete program that exports two metrics, a Gauge and a Counter.
// It also exports some stats about the HTTP usage of the /metrics
// endpoint. (See the Handler function for more detail.)
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
//
// Two more advanced metric types are the Summary and Histogram.
// Metrics
//
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// The number of exported identifiers in this package might appear a bit
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
// Prometheus server not to assume anything about its type.
//
// In addition to the fundamental metric types Gauge, Counter, Summary,
// Histogram, and Untyped, a very important part of the Prometheus data model is
// the partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec.
// HistogramVec, and UntypedVec.
//
// Those are all the parts needed for basic usage. Detailed documentation and
// examples are provided below.
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
// SummaryVec, HistogramVec, and UntypedVec are not.
//
// Everything else this package offers is essentially for "power users" only. A
// few pointers to "power user features":
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
// HistogramOpts, or UntypedOpts.
//
// All the various ...Opts structs have a ConstLabels field for labels that
// never change their value (which is only useful under special circumstances,
// see documentation of the Opts type).
// Custom Collectors and constant Metrics
//
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
// not to assume anything about its type.
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
// glance, a custom Collector seems handy to bundle Metrics for common
// registration (with the prime example of the different metric vectors above,
// which bundle all the metrics of the same name but with different labels).
//
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
// There is a more involved use case, too: If you already have metrics
// available, created outside of the Prometheus context, you don't need the
// interface of the various Metric types. You essentially want to mirror the
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances.
//
// For custom metric collection, there are two entry points: Custom Metric
// implementations and custom Collector implementations. A Metric is the
// fundamental unit in the Prometheus data model: a sample at a point in time
// together with its meta-data (like its fully-qualified name and any number of
// pairs of label name and label value) that knows how to marshal itself into a
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
// gets registered with the Prometheus registry and manages the collection of
// one or more Metrics. Many parts of this package are building blocks for
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
// metrics under the hood, and by Collectors to describe the Metrics to be
// collected, but only to be dealt with by users if they implement their own
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
// in handy. Other useful components for Metric and Collector implementation
// include: LabelPairSorter to sort the DTO version of label pairs,
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
// collection time, MetricVec to bundle custom Metrics into a metric vector
// Collector, SelfCollector to make a custom Metric collect itself.
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
// metrics) as examples that are used in this package itself.
//
// A good example for a custom Collector is the ExpVarCollector included in this
// package, which exports variables exported via the "expvar" package as
// Prometheus metrics.
// If you just need to call a function to get a single float value to collect as
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might
// cause. As suggested by the name, MustRegister panics if an error occurs. With
// the Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data
// model. Inconsistencies are ideally detected at registration time, not at
// collect time. The former will usually be detected at start-up time of a
// program, while the latter will only happen at scrape time, possibly not even
// on the first scrape if the inconsistency only becomes relevant later. That is
// the main reason why a Collector and a Metric have to describe themselves to
// the registry.
//
// So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegistry variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in
// the same way on a custom registry as the global functions Register and
// Unregister on the default registry.
//
// There are a number of uses for custom registries: You can use registries
// with special properties, see NewPedanticRegistry. You can avoid global state,
// as it is imposed by the DefaultRegistry. You can use multiple registries at
// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
// Also note that the DefaultRegistry comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
// HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp
// sub-package. (The top-level functions in the prometheus package are
// deprecated.)
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
// Other Means of Exposition
//
// More ways of exposing metrics can easily be added. Sending metrics to
// Graphite would be an example that will soon be implemented.
package prometheus

View file

@ -18,21 +18,21 @@ import (
"expvar"
)
// ExpvarCollector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the ExpvarCollector is inherently
// slow. Thus, the ExpvarCollector is probably great for experiments and
// prototying, but you should seriously consider a more direct implementation of
// Prometheus metrics for monitoring production systems.
//
// Use NewExpvarCollector to create new instances.
type ExpvarCollector struct {
type expvarCollector struct {
exports map[string]*Desc
}
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
// to be registered with the Prometheus registry.
// NewExpvarCollector returns a newly allocated expvar Collector that still has
// to be registered with a Prometheus registry.
//
// An expvar Collector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
//
// The exports map has the following meaning:
//
@ -59,21 +59,21 @@ type ExpvarCollector struct {
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
return &ExpvarCollector{
func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{
exports: exports,
}
}
// Describe implements Collector.
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
func (e *expvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports {
ch <- desc
}
}
// Collect implements Collector.
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
func (e *expvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports {
var m Metric
expVar := expvar.Get(name)

View file

@ -0,0 +1,29 @@
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}

View file

@ -13,8 +13,6 @@
package prometheus
import "hash/fnv"
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@ -29,16 +27,21 @@ type Gauge interface {
// Set sets the Gauge to an arbitrary value.
Set(float64)
// Inc increments the Gauge by 1.
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
// values.
Inc()
// Dec decrements the Gauge by 1.
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
// values.
Dec()
// Add adds the given value to the Gauge. (The value can be
// negative, resulting in a decrease of the Gauge.)
// Add adds the given value to the Gauge. (The value can be negative,
// resulting in a decrease of the Gauge.)
Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.)
Sub(float64)
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
SetToCurrentTime()
}
// GaugeOpts is an alias for Opts. See there for doc comments.
@ -60,7 +63,7 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
MetricVec
*MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@ -74,14 +77,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
}),
}
}

View file

@ -17,7 +17,7 @@ type goCollector struct {
// NewGoCollector returns a collector which exports metrics about the current
// go process.
func NewGoCollector() *goCollector {
func NewGoCollector() Collector {
return &goCollector{
goroutines: NewGauge(GaugeOpts{
Namespace: "go",
@ -48,7 +48,7 @@ func NewGoCollector() *goCollector {
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained by system. Sum of all system allocations.",
"Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
@ -111,12 +111,12 @@ func NewGoCollector() *goCollector {
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes_total"),
"Total number of heap bytes released to OS.",
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: CounterValue,
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
@ -211,7 +211,7 @@ func NewGoCollector() *goCollector {
"Number of seconds since 1970 of last garbage collection.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
},
},

View file

@ -15,7 +15,6 @@ package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync/atomic"
@ -52,11 +51,11 @@ type Histogram interface {
// bucket of a histogram ("le" -> "less or equal").
const bucketLabel = "le"
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
var (
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a
// network service. Most likely, however, you will be required to define
// buckets customized to your use case.
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf(
@ -211,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
// Finally we know the final length of h.upperBounds and can make counts.
h.counts = make([]uint64, len(h.upperBounds))
h.Init(h) // Init self-collection.
h.init(h) // Init self-collection.
return h
}
@ -223,7 +222,7 @@ type histogram struct {
sumBits uint64
count uint64
SelfCollector
selfCollector
// Note that there is no mutex required.
desc *Desc
@ -288,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
MetricVec
*MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@ -302,14 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}

View file

@ -15,14 +15,114 @@ package prometheus
import (
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/common/expfmt"
)
// TODO(beorn7): Remove this whole file. It is a partial mirror of
// promhttp/http.go (to avoid circular import chains) where everything HTTP
// related should live. The functions here are just for avoiding
// breakage. Everything is deprecated.
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
// (which is not instrumented).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
//
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather()
if err != nil {
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
})
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}
var instLabels = []string{"method", "code"}
type nower interface {
@ -57,29 +157,52 @@ func nowSeries(t ...time.Time) nower {
// has a constant label named "handler" with the provided handlerName as
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
// Deprecated: InstrumentHandler has several issues:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
//
// - It uses microseconds as unit, which is deprecated and should be replaced by
// seconds.
//
// - The size of the request is calculated in a separate goroutine. Since this
// calculator requires access to the request header, it creates a race with
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// Upcoming versions of this package will provide ways of instrumenting HTTP
// handlers that are more flexible and have fewer issues. Please prefer direct
// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
// InstrumentHandlerFunc wraps the given function for instrumentation. It
// otherwise works in the same way as InstrumentHandler.
// otherwise works in the same way as InstrumentHandler (and shares the same
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": handlerName},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
handlerFunc,
)
}
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
// flexibility (at the cost of a more complex call syntax). As
// InstrumentHandler, this function registers four metric collectors, but it
// uses the provided SummaryOpts to create them. However, the fields "Name" and
// "Help" in the SummaryOpts are ignored. "Name" is replaced by
// "requests_total", "request_duration_microseconds", "request_size_bytes", and
// "response_size_bytes", respectively. "Help" is replaced by an appropriate
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
// issues) but provides more flexibility (at the cost of a more complex call
// syntax). As InstrumentHandler, this function registers four metric
// collectors, but it uses the provided SummaryOpts to create them. However, the
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
// help string. The names of the variable labels of the http_requests_total
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
//
@ -98,13 +221,20 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
// and all its fields are set to the equally named fields in the provided
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
// more flexibility (at the cost of a more complex call syntax). See
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
// the same issues) but provides more flexibility (at the cost of a more complex
// call syntax). See InstrumentHandlerWithOpts for details how the provided
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
// as InstrumentHandler is.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{
@ -116,34 +246,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
},
instLabels,
)
if err := Register(reqCnt); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqCnt = are.ExistingCollector.(*CounterVec)
} else {
panic(err)
}
}
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts)
if err := Register(reqDur); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqDur = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts)
if err := Register(reqSz); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqSz = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts)
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
regReqDur := MustRegisterOrGet(reqDur).(Summary)
regReqSz := MustRegisterOrGet(reqSz).(Summary)
regResSz := MustRegisterOrGet(resSz).(Summary)
if err := Register(resSz); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
resSz = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
delegate := &responseWriterDelegator{ResponseWriter: w}
out := make(chan int)
urlLen := 0
if r.URL != nil {
urlLen = len(r.URL.String())
}
go computeApproximateRequestSize(r, out, urlLen)
out := computeApproximateRequestSize(r)
_, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher)
@ -161,30 +309,44 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
method := sanitizeMethod(r.Method)
code := sanitizeCode(delegate.status)
regReqCnt.WithLabelValues(method, code).Inc()
regReqDur.Observe(elapsed)
regResSz.Observe(float64(delegate.written))
regReqSz.Observe(float64(<-out))
reqCnt.WithLabelValues(method, code).Inc()
reqDur.Observe(elapsed)
resSz.Observe(float64(delegate.written))
reqSz.Observe(float64(<-out))
})
}
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
func computeApproximateRequestSize(r *http.Request) <-chan int {
// Get URL length in current go routine for avoiding a race condition.
// HandlerFunc that runs in parallel may modify the URL.
s := 0
if r.URL != nil {
s += len(r.URL.String())
}
out := make(chan int, 1)
go func() {
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
}
s += len(r.Host)
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
out <- s
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
out <- s
close(out)
}()
return out
}
type responseWriterDelegator struct {

View file

@ -22,10 +22,8 @@ import (
const separatorByte byte = 255
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
// Untyped, and Summary. Users can implement their own Metric types, but that
// should be rarely needed. See the example for SelfCollector, which is also an
// example for a user-implemented Metric.
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
// Histogram, Summary, and Untyped.
type Metric interface {
// Desc returns the descriptor for the Metric. This method idempotently
// returns the same descriptor throughout the lifetime of the
@ -36,21 +34,23 @@ type Metric interface {
// Write encodes the Metric into a "Metric" Protocol Buffer data
// transmission object.
//
// Implementers of custom Metric types must observe concurrency safety
// as reads of this metric may occur at any time, and any blocking
// occurs at the expense of total performance of rendering all
// registered metrics. Ideally Metric implementations should support
// concurrent readers.
// Metric implementations must observe concurrency safety as reads of
// this metric may occur at any time, and any blocking occurs at the
// expense of total performance of rendering all registered
// metrics. Ideally, Metric implementations should support concurrent
// readers.
//
// The Prometheus client library attempts to minimize memory allocations
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
// may recycle the dto.Metric proto message, so Metric implementations
// should just populate the provided dto.Metric and then should not keep
// any reference to it.
//
// While populating dto.Metric, labels must be sorted lexicographically.
// (Implementers may find LabelPairSorter useful for that.)
// While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. (Implementers may find
// LabelPairSorter useful for that.) Callers of Write should still make
// sure of sorting if they depend on it.
Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
// signature of this method should be changed to "Write() (*dto.Metric,
// error)".
}
// Opts bundles the options for creating most Metric types. Each metric

View file

@ -19,16 +19,16 @@ type processCollector struct {
pid int
collectFn func(chan<- Metric)
pidFn func() (int, error)
cpuTotal Counter
openFDs, maxFDs Gauge
vsize, rss Gauge
startTime Gauge
cpuTotal *Desc
openFDs, maxFDs *Desc
vsize, rss *Desc
startTime *Desc
}
// NewProcessCollector returns a collector which exports the current state of
// process metrics including cpu, memory and file descriptor usage as well as
// the process start time for the given process id under the given namespace.
func NewProcessCollector(pid int, namespace string) *processCollector {
func NewProcessCollector(pid int, namespace string) Collector {
return NewProcessCollectorPIDFn(
func() (int, error) { return pid, nil },
namespace,
@ -43,41 +43,46 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
) *processCollector {
) Collector {
ns := ""
if len(namespace) > 0 {
ns = namespace + "_"
}
c := processCollector{
pidFn: pidFn,
collectFn: func(chan<- Metric) {},
cpuTotal: NewCounter(CounterOpts{
Namespace: namespace,
Name: "process_cpu_seconds_total",
Help: "Total user and system CPU time spent in seconds.",
}),
openFDs: NewGauge(GaugeOpts{
Namespace: namespace,
Name: "process_open_fds",
Help: "Number of open file descriptors.",
}),
maxFDs: NewGauge(GaugeOpts{
Namespace: namespace,
Name: "process_max_fds",
Help: "Maximum number of open file descriptors.",
}),
vsize: NewGauge(GaugeOpts{
Namespace: namespace,
Name: "process_virtual_memory_bytes",
Help: "Virtual memory size in bytes.",
}),
rss: NewGauge(GaugeOpts{
Namespace: namespace,
Name: "process_resident_memory_bytes",
Help: "Resident memory size in bytes.",
}),
startTime: NewGauge(GaugeOpts{
Namespace: namespace,
Name: "process_start_time_seconds",
Help: "Start time of the process since unix epoch in seconds.",
}),
cpuTotal: NewDesc(
ns+"process_cpu_seconds_total",
"Total user and system CPU time spent in seconds.",
nil, nil,
),
openFDs: NewDesc(
ns+"process_open_fds",
"Number of open file descriptors.",
nil, nil,
),
maxFDs: NewDesc(
ns+"process_max_fds",
"Maximum number of open file descriptors.",
nil, nil,
),
vsize: NewDesc(
ns+"process_virtual_memory_bytes",
"Virtual memory size in bytes.",
nil, nil,
),
rss: NewDesc(
ns+"process_resident_memory_bytes",
"Resident memory size in bytes.",
nil, nil,
),
startTime: NewDesc(
ns+"process_start_time_seconds",
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
}
// Set up process metric collection if supported by the runtime.
@ -90,12 +95,12 @@ func NewProcessCollectorPIDFn(
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.cpuTotal.Desc()
ch <- c.openFDs.Desc()
ch <- c.maxFDs.Desc()
ch <- c.vsize.Desc()
ch <- c.rss.Desc()
ch <- c.startTime.Desc()
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
ch <- c.rss
ch <- c.startTime
}
// Collect returns the current state of all metrics of the collector.
@ -117,26 +122,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
}
if stat, err := p.NewStat(); err == nil {
c.cpuTotal.Set(stat.CPUTime())
ch <- c.cpuTotal
c.vsize.Set(float64(stat.VirtualMemory()))
ch <- c.vsize
c.rss.Set(float64(stat.ResidentMemory()))
ch <- c.rss
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
c.startTime.Set(startTime)
ch <- c.startTime
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
c.openFDs.Set(float64(fds))
ch <- c.openFDs
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
}
if limits, err := p.NewLimits(); err == nil {
c.maxFDs.Set(float64(limits.OpenFiles))
ch <- c.maxFDs
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
}
}

View file

@ -0,0 +1,201 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
// Package promhttp contains functions to create http.Handler instances to
// expose Prometheus metrics via HTTP. In later versions of this package, it
// will also contain tooling to instrument instances of http.Handler and
// http.RoundTripper.
//
// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
// you can create a handler for a custom registry or anything that implements
// the Gatherer interface. It also allows to create handlers that act
// differently on errors or allow to log errors.
package promhttp
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"net/http"
"strings"
"sync"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/prometheus"
)
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
// error, no error logging, and compression if requested by the client.
//
// If you want to create a Handler for the DefaultGatherer with different
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
// your desired HandlerOpts.
func Handler() http.Handler {
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
}
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
// of the Handler is defined by the provided HandlerOpts.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := reg.Gather()
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err)
}
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
if len(mfs) == 0 {
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding metric family:", err)
}
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
// Handled later.
case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
// TODO(beorn7): Consider streaming serving of metrics.
})
}
// HandlerErrorHandling defines how a Handler serving metrics will handle
// errors.
type HandlerErrorHandling int
// These constants cause handlers serving metrics to behave as described if
// errors are encountered.
const (
// Serve an HTTP status code 500 upon the first error
// encountered. Report the error message in the body.
HTTPErrorOnError HandlerErrorHandling = iota
// Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the
// last error message in the body. Only use this in deliberate "best
// effort" metrics collection scenarios. It is recommended to at least
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
// errors completely.
ContinueOnError
// Panic upon the first error encountered (useful for "crash only" apps).
PanicOnError
)
// Logger is the minimal interface HandlerOpts needs for logging. Note that
// log.Logger from the standard library implements this interface, and it is
// easy to implement by custom loggers, if they don't do so already anyway.
type Logger interface {
Println(v ...interface{})
}
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct {
// ErrorLog specifies an optional logger for errors collecting and
// serving metrics. If nil, errors are not logged at all.
ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog
// is not nil.
ErrorHandling HandlerErrorHandling
// If DisableCompression is true, the handler will never compress the
// response, even if requested by the client.
DisableCompression bool
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
if compressionDisabled {
return writer, ""
}
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}

View file

@ -1,65 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package prometheus
// Push triggers a metric collection by the default registry and pushes all
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
// documentation for detailed implications of the job and instance
// parameter. instance can be left empty. You can use just host:port or ip:port
// as url, in which case 'http://' is added automatically. You can also include
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "POST")
}
// PushCollectors works like Push, but it does not collect from the default
// registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "PUT", collectors...)
}
// PushAddCollectors works like PushAdd, but it does not collect from the
// default registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "POST", collectors...)
}
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
r := newRegistry()
for _, collector := range collectors {
if _, err := r.Register(collector); err != nil {
return err
}
}
return r.Push(job, instance, url, method)
}

File diff suppressed because it is too large Load diff

View file

@ -15,7 +15,6 @@ package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync"
@ -54,8 +53,11 @@ type Summary interface {
Observe(float64)
}
// DefObjectives are the default Summary quantile values.
//
// Deprecated: DefObjectives will not be used as the default objectives in
// v0.10 of the library. The default Summary will have no quantiles then.
var (
// DefObjectives are the default Summary quantile values.
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf(
@ -114,9 +116,15 @@ type SummaryOpts struct {
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
// absolute error. If Objectives[q] = e, then the value reported
// for q will be the φ-quantile value for some φ between q-e and q+e.
// The default value is DefObjectives.
// absolute error. If Objectives[q] = e, then the value reported for q
// will be the φ-quantile value for some φ between q-e and q+e. The
// default value is DefObjectives. It is used if Objectives is left at
// its zero value (i.e. nil). To create a Summary without Objectives,
// set it to an empty map (i.e. map[float64]float64{}).
//
// Deprecated: Note that the current value of DefObjectives is
// deprecated. It will be replaced by an empty map in v0.10 of the
// library. Please explicitly set Objectives to the desired value.
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
@ -140,11 +148,11 @@ type SummaryOpts struct {
BufCap uint32
}
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
// method of perk/quantile is actually not working as advertised - and it might
// be unfixable, as the underlying algorithm is apparently not capable of
// merging summaries in the first place. To avoid using Merge, we are currently
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
// summaries in the first place. To avoid using Merge, we are currently adding
// observations to _each_ age bucket, i.e. the effort to add a sample is
// essentially multiplied by the number of age buckets. When rotating age
// buckets, we empty the previous head stream. On scrape time, we simply take
// the quantiles from the head stream (no merging required). Result: More effort
@ -184,7 +192,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
}
}
if len(opts.Objectives) == 0 {
if opts.Objectives == nil {
opts.Objectives = DefObjectives
}
@ -228,12 +236,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
}
sort.Float64s(s.sortedObjectives)
s.Init(s) // Init self-collection.
s.init(s) // Init self-collection.
return s
}
type summary struct {
SelfCollector
selfCollector
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
mtx sync.Mutex // Protects every other moving part.
@ -391,7 +399,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
MetricVec
*MetricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@ -405,14 +413,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}

View file

@ -0,0 +1,74 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "time"
// Observer is the interface that wraps the Observe method, which is used by
// Histogram and Summary to add observations.
type Observer interface {
Observe(float64)
}
// The ObserverFunc type is an adapter to allow the use of ordinary
// functions as Observers. If f is a function with the appropriate
// signature, ObserverFunc(f) is an Observer that calls f.
//
// This adapter is usually used in connection with the Timer type, and there are
// two general use cases:
//
// The most common one is to use a Gauge as the Observer for a Timer.
// See the "Gauge" Timer example.
//
// The more advanced use case is to create a function that dynamically decides
// which Observer to use for observing the duration. See the "Complex" Timer
// example.
type ObserverFunc func(float64)
// Observe calls f(value). It implements Observer.
func (f ObserverFunc) Observe(value float64) {
f(value)
}
// Timer is a helper type to time functions. Use NewTimer to create new
// instances.
type Timer struct {
begin time.Time
observer Observer
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
// duration in seconds. Timer is usually used to time a function call in the
// following way:
// func TimeMe() {
// timer := NewTimer(myHistogram)
// defer timer.ObserveDuration()
// // Do actual work.
// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
observer: o,
}
}
// ObserveDuration records the duration passed since the Timer was created with
// NewTimer. It calls the Observe method of the Observer provided during
// construction with the duration in seconds as an argument. ObserveDuration is
// usually called with a defer statement.
func (t *Timer) ObserveDuration() {
if t.observer != nil {
t.observer.Observe(time.Since(t.begin).Seconds())
}
}

View file

@ -13,8 +13,6 @@
package prometheus
import "hash/fnv"
// Untyped is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@ -22,6 +20,11 @@ import "hash/fnv"
// no type information is implied.
//
// To create Untyped instances, use NewUntyped.
//
// Deprecated: The Untyped type is deprecated because it doesn't make sense in
// direct instrumentation. If you need to mirror an external metric of unknown
// type (usually while writing exporters), Use MustNewConstMetric to create an
// untyped metric instance on the fly.
type Untyped interface {
Metric
Collector
@ -58,7 +61,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
// labels. This is used if you want to count the same thing partitioned by
// various dimensions. Create instances with NewUntypedVec.
type UntypedVec struct {
MetricVec
*MetricVec
}
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
@ -72,14 +75,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
opts.ConstLabels,
)
return &UntypedVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
}),
}
}

View file

@ -19,6 +19,7 @@ import (
"math"
"sort"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
@ -48,7 +49,7 @@ type value struct {
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
SelfCollector
selfCollector
desc *Desc
valType ValueType
@ -68,7 +69,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
valBits: math.Float64bits(val),
labelPairs: makeLabelPairs(desc, labelValues),
}
result.Init(result)
result.init(result)
return result
}
@ -80,6 +81,10 @@ func (v *value) Set(val float64) {
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
}
func (v *value) SetToCurrentTime() {
v.Set(float64(time.Now().UnixNano()) / 1e9)
}
func (v *value) Inc() {
v.Add(1)
}
@ -113,7 +118,7 @@ func (v *value) Write(out *dto.Metric) error {
// library to back the implementations of CounterFunc, GaugeFunc, and
// UntypedFunc.
type valueFunc struct {
SelfCollector
selfCollector
desc *Desc
valType ValueType
@ -134,7 +139,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
function: function,
labelPairs: makeLabelPairs(desc, nil),
}
result.Init(result)
result.init(result)
return result
}

View file

@ -14,10 +14,10 @@
package prometheus
import (
"bytes"
"fmt"
"hash"
"sync"
"github.com/prometheus/common/model"
)
// MetricVec is a Collector to bundle metrics of the same name that
@ -26,17 +26,32 @@ import (
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
// provided in this package.
type MetricVec struct {
mtx sync.RWMutex // Protects not only children, but also hash and buf.
children map[uint64]Metric
mtx sync.RWMutex // Protects the children.
children map[uint64][]metricWithLabelValues
desc *Desc
// hash is our own hash instance to avoid repeated allocations.
hash hash.Hash64
// buf is used to copy string contents into it for hashing,
// again to avoid allocations.
buf bytes.Buffer
newMetric func(labelValues ...string) Metric
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
hashAddByte func(h uint64, b byte) uint64
}
newMetric func(labelValues ...string) Metric
// newMetricVec returns an initialized MetricVec. The concrete value is
// returned for embedding into another struct.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &MetricVec{
children: map[uint64][]metricWithLabelValues{},
desc: desc,
newMetric: newMetric,
hashAdd: hashAdd,
hashAddByte: hashAddByte,
}
}
// metricWithLabelValues provides the metric and its label values for
// disambiguation on hash collision.
type metricWithLabelValues struct {
values []string
metric Metric
}
// Describe implements Collector. The length of the returned slice
@ -50,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
m.mtx.RLock()
defer m.mtx.RUnlock()
for _, metric := range m.children {
ch <- metric
for _, metrics := range m.children {
for _, metric := range metrics {
ch <- metric.metric
}
}
}
@ -80,14 +97,12 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
return m.getOrCreateMetric(h, lvs...), nil
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@ -103,18 +118,12 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
lvs := make([]string, len(labels))
for i, label := range m.desc.variableLabels {
lvs[i] = labels[label]
}
return m.getOrCreateMetric(h, lvs...), nil
return m.getOrCreateMetricWithLabels(h, labels), nil
}
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
@ -162,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
if err != nil {
return false
}
if _, has := m.children[h]; !has {
return false
}
delete(m.children, h)
return true
return m.deleteByHashWithLabelValues(h, lvs)
}
// Delete deletes the metric where the variable labels are the same as those
@ -187,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
if err != nil {
return false
}
if _, has := m.children[h]; !has {
return m.deleteByHashWithLabels(h, labels)
}
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
delete(m.children, h)
i := m.findMetricWithLabelValues(metrics, lvs)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
i := m.findMetricWithLabels(metrics, labels)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
@ -208,40 +253,152 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if len(vals) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
m.hash.Reset()
h := hashNew()
for _, val := range vals {
m.buf.Reset()
m.buf.WriteString(val)
m.hash.Write(m.buf.Bytes())
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return m.hash.Sum64(), nil
return h, nil
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if len(labels) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
m.hash.Reset()
h := hashNew()
for _, label := range m.desc.variableLabels {
val, ok := labels[label]
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
m.buf.Reset()
m.buf.WriteString(val)
m.hash.Write(m.buf.Bytes())
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return m.hash.Sum64(), nil
return h, nil
}
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
metric, ok := m.children[hash]
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabelValues(hash, lvs)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabelValues(hash, lvs)
if !ok {
// Copy labelValues. Otherwise, they would be allocated even if we don't go
// down this code path.
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
metric = m.newMetric(copiedLabelValues...)
m.children[hash] = metric
// Copy to avoid allocation in case wo don't go down this code path.
copiedLVs := make([]string, len(lvs))
copy(copiedLVs, lvs)
metric = m.newMetric(copiedLVs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
}
return metric
}
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabels(hash, labels)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabels(hash, labels)
if !ok {
lvs := m.extractLabelValues(labels)
metric = m.newMetric(lvs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
// getMetricWithLabelValues gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// getMetricWithLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
for i, metric := range metrics {
if m.matchLabelValues(metric.values, lvs) {
return i
}
}
return len(metrics)
}
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
for i, metric := range metrics {
if m.matchLabels(metric.values, labels) {
return i
}
}
return len(metrics)
}
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
if len(values) != len(lvs) {
return false
}
for i, v := range values {
if v != lvs[i] {
return false
}
}
return true
}
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
if len(labels) != len(values) {
return false
}
for i, k := range m.desc.variableLabels {
if values[i] != labels[k] {
return false
}
}
return true
}
func (m *MetricVec) extractLabelValues(labels Labels) []string {
labelValues := make([]string, len(labels))
for i, k := range m.desc.variableLabels {
labelValues[i] = labels[k]
}
return labelValues
}

View file

@ -46,10 +46,7 @@ func ResponseFormat(h http.Header) Format {
return FmtUnknown
}
const (
textType = "text/plain"
jsonType = "application/json"
)
const textType = "text/plain"
switch mediatype {
case ProtoType:
@ -66,22 +63,6 @@ func ResponseFormat(h http.Header) Format {
return FmtUnknown
}
return FmtText
case jsonType:
var prometheusAPIVersion string
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
prometheusAPIVersion = params["version"]
} else {
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
}
switch prometheusAPIVersion {
case "0.0.2", "":
return fmtJSON2
default:
return FmtUnknown
}
}
return FmtUnknown
@ -93,8 +74,6 @@ func NewDecoder(r io.Reader, format Format) Decoder {
switch format {
case FmtProtoDelim:
return &protoDecoder{r: r}
case fmtJSON2:
return newJSON2Decoder(r)
}
return &textDecoder{r: r}
}
@ -107,10 +86,32 @@ type protoDecoder struct {
// Decode implements the Decoder interface.
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
_, err := pbutil.ReadDelimited(d.r, v)
return err
if err != nil {
return err
}
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
return fmt.Errorf("invalid metric name %q", v.GetName())
}
for _, m := range v.GetMetric() {
if m == nil {
continue
}
for _, l := range m.GetLabel() {
if l == nil {
continue
}
if !model.LabelValue(l.GetValue()).IsValid() {
return fmt.Errorf("invalid label value %q", l.GetValue())
}
if !model.LabelName(l.GetName()).IsValid() {
return fmt.Errorf("invalid label name %q", l.GetName())
}
}
}
return nil
}
// textDecoder implements the Decoder interface for the text protcol.
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
p TextParser

View file

@ -18,9 +18,9 @@ import (
"io"
"net/http"
"bitbucket.org/ww/goautoneg"
"github.com/golang/protobuf/proto"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
dto "github.com/prometheus/client_model/go"
)

View file

@ -29,9 +29,6 @@ const (
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
// fmtJSON2 is hidden as it is deprecated.
fmtJSON2 Format = `application/json; version=0.0.2`
)
const (

View file

@ -20,8 +20,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
// go-fuzz-build github.com/prometheus/client_golang/text
// go-fuzz -bin text-fuzz.zip -workdir fuzz
// go-fuzz-build github.com/prometheus/common/expfmt
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {

View file

@ -1,162 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expfmt
import (
"encoding/json"
"fmt"
"io"
"sort"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
)
type json2Decoder struct {
dec *json.Decoder
fams []*dto.MetricFamily
}
func newJSON2Decoder(r io.Reader) Decoder {
return &json2Decoder{
dec: json.NewDecoder(r),
}
}
type histogram002 struct {
Labels model.LabelSet `json:"labels"`
Values map[string]float64 `json:"value"`
}
type counter002 struct {
Labels model.LabelSet `json:"labels"`
Value float64 `json:"value"`
}
func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
labels := base.Clone().Merge(ext)
delete(labels, model.MetricNameLabel)
names := make([]string, 0, len(labels))
for ln := range labels {
names = append(names, string(ln))
}
sort.Strings(names)
pairs := make([]*dto.LabelPair, 0, len(labels))
for _, ln := range names {
lv := labels[model.LabelName(ln)]
pairs = append(pairs, &dto.LabelPair{
Name: proto.String(ln),
Value: proto.String(string(lv)),
})
}
return pairs
}
func (d *json2Decoder) more() error {
var entities []struct {
BaseLabels model.LabelSet `json:"baseLabels"`
Docstring string `json:"docstring"`
Metric struct {
Type string `json:"type"`
Values json.RawMessage `json:"value"`
} `json:"metric"`
}
if err := d.dec.Decode(&entities); err != nil {
return err
}
for _, e := range entities {
f := &dto.MetricFamily{
Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
Help: proto.String(e.Docstring),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{},
}
d.fams = append(d.fams, f)
switch e.Metric.Type {
case "counter", "gauge":
var values []counter002
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
}
for _, ctr := range values {
f.Metric = append(f.Metric, &dto.Metric{
Label: protoLabelSet(e.BaseLabels, ctr.Labels),
Untyped: &dto.Untyped{
Value: proto.Float64(ctr.Value),
},
})
}
case "histogram":
var values []histogram002
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
}
for _, hist := range values {
quants := make([]string, 0, len(values))
for q := range hist.Values {
quants = append(quants, q)
}
sort.Strings(quants)
for _, q := range quants {
value := hist.Values[q]
// The correct label is "quantile" but to not break old expressions
// this remains "percentile"
hist.Labels["percentile"] = model.LabelValue(q)
f.Metric = append(f.Metric, &dto.Metric{
Label: protoLabelSet(e.BaseLabels, hist.Labels),
Untyped: &dto.Untyped{
Value: proto.Float64(value),
},
})
}
}
default:
return fmt.Errorf("unknown metric type %q", e.Metric.Type)
}
}
return nil
}
// Decode implements the Decoder interface.
func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
if len(d.fams) == 0 {
if err := d.more(); err != nil {
return err
}
}
*v = *d.fams[0]
d.fams = d.fams[1:]
return nil
}

View file

@ -14,7 +14,6 @@
package expfmt
import (
"bytes"
"fmt"
"io"
"math"
@ -26,9 +25,12 @@ import (
// MetricFamilyToText converts a MetricFamily proto message into text format and
// writes the resulting lines to 'out'. It returns the number of bytes written
// and any error encountered. This function does not perform checks on the
// content of the metric and label names, i.e. invalid metric or label names
// and any error encountered. The output will have the same order as the input,
// no further sorting is performed. Furthermore, this function assumes the input
// is already sanitized and does not perform any sanity checks. If the input
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
var written int
@ -285,21 +287,17 @@ func labelPairsToText(
return written, nil
}
var (
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
)
// escapeString replaces '\' by '\\', new line character by '\n', and - if
// includeDoubleQuote is true - '"' by '\"'.
func escapeString(v string, includeDoubleQuote bool) string {
result := bytes.NewBuffer(make([]byte, 0, len(v)))
for _, c := range v {
switch {
case c == '\\':
result.WriteString(`\\`)
case includeDoubleQuote && c == '"':
result.WriteString(`\"`)
case c == '\n':
result.WriteString(`\n`)
default:
result.WriteRune(c)
}
if includeDoubleQuote {
return escapeWithDoubleQuote.Replace(v)
}
return result.String()
return escape.Replace(v)
}

View file

@ -47,7 +47,7 @@ func (e ParseError) Error() string {
}
// TextParser is used to parse the simple and flat text-based exchange format. Its
// nil value is ready to use.
// zero value is ready to use.
type TextParser struct {
metricFamiliesByName map[string]*dto.MetricFamily
buf *bufio.Reader // Where the parsed input is read through.
@ -108,6 +108,13 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
delete(p.metricFamiliesByName, k)
}
}
// If p.err is io.EOF now, we have run into a premature end of the input
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
if p.err == io.EOF {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err
}

View file

@ -0,0 +1,67 @@
PACKAGE
package goautoneg
import "bitbucket.org/ww/goautoneg"
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FUNCTIONS
func Negotiate(header string, alternatives []string) (content_type string)
Negotiate the most appropriate content_type given the accept header
and a list of alternatives.
func ParseAccept(header string) (accept []Accept)
Parse an Accept Header string returning a sorted list
of clauses
TYPES
type Accept struct {
Type, SubType string
Q float32
Params map[string]string
}
Structure to represent a clause in an HTTP Accept Header
SUBDIRECTORIES
.hg

View file

@ -0,0 +1,162 @@
/*
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package goautoneg
import (
"sort"
"strconv"
"strings"
)
// Structure to represent a clause in an HTTP Accept Header
type Accept struct {
Type, SubType string
Q float64
Params map[string]string
}
// For internal use, so that we can use the sort interface
type accept_slice []Accept
func (accept accept_slice) Len() int {
slice := []Accept(accept)
return len(slice)
}
func (accept accept_slice) Less(i, j int) bool {
slice := []Accept(accept)
ai, aj := slice[i], slice[j]
if ai.Q > aj.Q {
return true
}
if ai.Type != "*" && aj.Type == "*" {
return true
}
if ai.SubType != "*" && aj.SubType == "*" {
return true
}
return false
}
func (accept accept_slice) Swap(i, j int) {
slice := []Accept(accept)
slice[i], slice[j] = slice[j], slice[i]
}
// Parse an Accept Header string returning a sorted list
// of clauses
func ParseAccept(header string) (accept []Accept) {
parts := strings.Split(header, ",")
accept = make([]Accept, 0, len(parts))
for _, part := range parts {
part := strings.Trim(part, " ")
a := Accept{}
a.Params = make(map[string]string)
a.Q = 1.0
mrp := strings.Split(part, ";")
media_range := mrp[0]
sp := strings.Split(media_range, "/")
a.Type = strings.Trim(sp[0], " ")
switch {
case len(sp) == 1 && a.Type == "*":
a.SubType = "*"
case len(sp) == 2:
a.SubType = strings.Trim(sp[1], " ")
default:
continue
}
if len(mrp) == 1 {
accept = append(accept, a)
continue
}
for _, param := range mrp[1:] {
sp := strings.SplitN(param, "=", 2)
if len(sp) != 2 {
continue
}
token := strings.Trim(sp[0], " ")
if token == "q" {
a.Q, _ = strconv.ParseFloat(sp[1], 32)
} else {
a.Params[token] = strings.Trim(sp[1], " ")
}
}
accept = append(accept, a)
}
slice := accept_slice(accept)
sort.Sort(slice)
return
}
// Negotiate the most appropriate content_type given the accept header
// and a list of alternatives.
func Negotiate(header string, alternatives []string) (content_type string) {
asp := make([][]string, 0, len(alternatives))
for _, ctype := range alternatives {
asp = append(asp, strings.SplitN(ctype, "/", 2))
}
for _, clause := range ParseAccept(header) {
for i, ctsp := range asp {
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
content_type = alternatives[i]
return
}
if clause.Type == ctsp[0] && clause.SubType == "*" {
content_type = alternatives[i]
return
}
if clause.Type == "*" && clause.SubType == "*" {
content_type = alternatives[i]
return
}
}
}
return
}

View file

@ -35,8 +35,9 @@ type Alert struct {
Annotations LabelSet `json:"annotations"`
// The known time range for this alert. Both ends are optional.
StartsAt time.Time `json:"startsAt,omitempty"`
EndsAt time.Time `json:"endsAt,omitempty"`
StartsAt time.Time `json:"startsAt,omitempty"`
EndsAt time.Time `json:"endsAt,omitempty"`
GeneratorURL string `json:"generatorURL"`
}
// Name returns the name of the alert. It is equivalent to the "alertname" label.
@ -60,10 +61,16 @@ func (a *Alert) String() string {
// Resolved returns true iff the activity interval ended in the past.
func (a *Alert) Resolved() bool {
return a.ResolvedAt(time.Now())
}
// ResolvedAt returns true off the activity interval ended before
// the given timestamp.
func (a *Alert) ResolvedAt(ts time.Time) bool {
if a.EndsAt.IsZero() {
return false
}
return !a.EndsAt.After(time.Now())
return !a.EndsAt.After(ts)
}
// Status returns the status of the alert.
@ -74,6 +81,26 @@ func (a *Alert) Status() AlertStatus {
return AlertFiring
}
// Validate checks whether the alert data is inconsistent.
func (a *Alert) Validate() error {
if a.StartsAt.IsZero() {
return fmt.Errorf("start time missing")
}
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
return fmt.Errorf("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
return fmt.Errorf("invalid label set: %s", err)
}
if len(a.Labels) == 0 {
return fmt.Errorf("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
return fmt.Errorf("invalid annotations: %s", err)
}
return nil
}
// Alert is a list of alerts that can be sorted in chronological order.
type Alerts []*Alert

42
vendor/github.com/prometheus/common/model/fnv.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}

View file

@ -17,8 +17,8 @@ import (
"encoding/json"
"fmt"
"regexp"
"sort"
"strings"
"unicode/utf8"
)
const (
@ -80,20 +80,37 @@ const (
QuantileLabel = "quantile"
)
// LabelNameRE is a regular expression matching valid label names.
// LabelNameRE is a regular expression matching valid label names. Note that the
// IsValid method of LabelName performs the same check but faster than a match
// with this regular expression.
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// A LabelName is a key for a LabelSet or Metric. It has a value associated
// therewith.
type LabelName string
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
// method, however, does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
}
for i, b := range ln {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
return false
}
}
return true
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
if !LabelNameRE.MatchString(s) {
if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)
@ -106,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &s); err != nil {
return err
}
if !LabelNameRE.MatchString(s) {
if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)
@ -139,6 +156,11 @@ func (l LabelNames) String() string {
// A LabelValue is an associated value for a LabelName.
type LabelValue string
// IsValid returns true iff the string is a valid UTF8.
func (lv LabelValue) IsValid() bool {
return utf8.ValidString(string(lv))
}
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
type LabelValues []LabelValue
@ -147,7 +169,7 @@ func (l LabelValues) Len() int {
}
func (l LabelValues) Less(i, j int) bool {
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
return string(l[i]) < string(l[j])
}
func (l LabelValues) Swap(i, j int) {

View file

@ -27,6 +27,21 @@ import (
// match.
type LabelSet map[LabelName]LabelValue
// Validate checks whether all names and values in the label set
// are valid.
func (ls LabelSet) Validate() error {
for ln, lv := range ls {
if !ln.IsValid() {
return fmt.Errorf("invalid name %q", ln)
}
if !lv.IsValid() {
return fmt.Errorf("invalid value %q", lv)
}
}
return nil
}
// Equal returns true iff both label sets have exactly the same key/value pairs.
func (ls LabelSet) Equal(o LabelSet) bool {
if len(ls) != len(o) {
return false
@ -90,6 +105,7 @@ func (ls LabelSet) Before(o LabelSet) bool {
return false
}
// Clone returns a copy of the label set.
func (ls LabelSet) Clone() LabelSet {
lsn := make(LabelSet, len(ls))
for ln, lv := range ls {
@ -144,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
// LabelName as a string and does not call its UnmarshalJSON method.
// Thus, we have to replicate the behavior here.
for ln := range m {
if !LabelNameRE.MatchString(string(ln)) {
if !ln.IsValid() {
return fmt.Errorf("%q is not a valid label name", ln)
}
}

View file

@ -15,11 +15,18 @@ package model
import (
"fmt"
"regexp"
"sort"
"strings"
)
var separator = []byte{0}
var (
separator = []byte{0}
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.
@ -79,3 +86,18 @@ func (m Metric) Fingerprint() Fingerprint {
func (m Metric) FastFingerprint() Fingerprint {
return LabelSet(m).FastFingerprint()
}
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
func IsValidMetricName(n LabelValue) bool {
if len(n) == 0 {
return false
}
for i, b := range n {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
return false
}
}
return true
}

View file

@ -12,5 +12,5 @@
// limitations under the License.
// Package model contains common data structures that are shared across
// Prometheus componenets and libraries.
// Prometheus components and libraries.
package model

View file

@ -14,11 +14,7 @@
package model
import (
"bytes"
"hash"
"hash/fnv"
"sort"
"sync"
)
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
@ -28,30 +24,9 @@ const SeparatorByte byte = 255
var (
// cache the signature of an empty label set.
emptyLabelSignature = fnv.New64a().Sum64()
hashAndBufPool sync.Pool
emptyLabelSignature = hashNew()
)
type hashAndBuf struct {
h hash.Hash64
b bytes.Buffer
}
func getHashAndBuf() *hashAndBuf {
hb := hashAndBufPool.Get()
if hb == nil {
return &hashAndBuf{h: fnv.New64a()}
}
return hb.(*hashAndBuf)
}
func putHashAndBuf(hb *hashAndBuf) {
hb.h.Reset()
hb.b.Reset()
hashAndBufPool.Put(hb)
}
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label
// sets the function is applied to is small.)
@ -66,18 +41,14 @@ func LabelsToSignature(labels map[string]string) uint64 {
}
sort.Strings(labelNames)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
sum := hashNew()
for _, labelName := range labelNames {
hb.b.WriteString(labelName)
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(labels[labelName])
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
sum = hashAdd(sum, labelName)
sum = hashAddByte(sum, SeparatorByte)
sum = hashAdd(sum, labels[labelName])
sum = hashAddByte(sum, SeparatorByte)
}
return hb.h.Sum64()
return sum
}
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
@ -93,18 +64,14 @@ func labelSetToFingerprint(ls LabelSet) Fingerprint {
}
sort.Sort(labelNames)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
sum := hashNew()
for _, labelName := range labelNames {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(ls[labelName]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
sum = hashAdd(sum, string(labelName))
sum = hashAddByte(sum, SeparatorByte)
sum = hashAdd(sum, string(ls[labelName]))
sum = hashAddByte(sum, SeparatorByte)
}
return Fingerprint(hb.h.Sum64())
return Fingerprint(sum)
}
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
@ -116,17 +83,12 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
}
var result uint64
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for labelName, labelValue := range ls {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(labelValue))
hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64()
hb.h.Reset()
hb.b.Reset()
sum := hashNew()
sum = hashAdd(sum, string(labelName))
sum = hashAddByte(sum, SeparatorByte)
sum = hashAdd(sum, string(labelValue))
result ^= sum
}
return Fingerprint(result)
}
@ -136,24 +98,20 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
// specified LabelNames into the signature calculation. The labels passed in
// will be sorted by this function.
func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
if len(m) == 0 || len(labels) == 0 {
if len(labels) == 0 {
return emptyLabelSignature
}
sort.Sort(LabelNames(labels))
hb := getHashAndBuf()
defer putHashAndBuf(hb)
sum := hashNew()
for _, label := range labels {
hb.b.WriteString(string(label))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[label]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
sum = hashAdd(sum, string(label))
sum = hashAddByte(sum, SeparatorByte)
sum = hashAdd(sum, string(m[label]))
sum = hashAddByte(sum, SeparatorByte)
}
return hb.h.Sum64()
return sum
}
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
@ -175,16 +133,12 @@ func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
}
sort.Sort(labelNames)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
sum := hashNew()
for _, labelName := range labelNames {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[labelName]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
sum = hashAdd(sum, string(labelName))
sum = hashAddByte(sum, SeparatorByte)
sum = hashAdd(sum, string(m[labelName]))
sum = hashAddByte(sum, SeparatorByte)
}
return hb.h.Sum64()
return sum
}

View file

@ -44,6 +44,21 @@ func (m *Matcher) UnmarshalJSON(b []byte) error {
return nil
}
// Validate returns true iff all fields of the matcher have valid values.
func (m *Matcher) Validate() error {
if !m.Name.IsValid() {
return fmt.Errorf("invalid name %q", m.Name)
}
if m.IsRegex {
if _, err := regexp.Compile(m.Value); err != nil {
return fmt.Errorf("invalid regular expression %q", m.Value)
}
} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
return fmt.Errorf("invalid value %q", m.Value)
}
return nil
}
// Silence defines the representation of a silence definiton
// in the Prometheus eco-system.
type Silence struct {
@ -58,3 +73,34 @@ type Silence struct {
CreatedBy string `json:"createdBy"`
Comment string `json:"comment,omitempty"`
}
// Validate returns true iff all fields of the silence have valid values.
func (s *Silence) Validate() error {
if len(s.Matchers) == 0 {
return fmt.Errorf("at least one matcher required")
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
return fmt.Errorf("invalid matcher: %s", err)
}
}
if s.StartsAt.IsZero() {
return fmt.Errorf("start time missing")
}
if s.EndsAt.IsZero() {
return fmt.Errorf("end time missing")
}
if s.EndsAt.Before(s.StartsAt) {
return fmt.Errorf("start time must be before end time")
}
if s.CreatedBy == "" {
return fmt.Errorf("creator information missing")
}
if s.Comment == "" {
return fmt.Errorf("comment missing")
}
if s.CreatedAt.IsZero() {
return fmt.Errorf("creation timestamp missing")
}
return nil
}

View file

@ -163,51 +163,70 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
// StringToDuration parses a string into a time.Duration, assuming that a year
// a day always has 24h.
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
matches := durationRE.FindStringSubmatch(durationStr)
if len(matches) != 3 {
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
}
durSeconds, _ := strconv.Atoi(matches[1])
dur := time.Duration(durSeconds) * time.Second
unit := matches[2]
switch unit {
var (
n, _ = strconv.Atoi(matches[1])
dur = time.Duration(n) * time.Millisecond
)
switch unit := matches[2]; unit {
case "y":
dur *= 1000 * 60 * 60 * 24 * 365
case "w":
dur *= 1000 * 60 * 60 * 24 * 7
case "d":
dur *= 60 * 60 * 24
dur *= 1000 * 60 * 60 * 24
case "h":
dur *= 60 * 60
dur *= 1000 * 60 * 60
case "m":
dur *= 60
dur *= 1000 * 60
case "s":
dur *= 1
dur *= 1000
case "ms":
// Value already correct
default:
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
}
return Duration(dur), nil
}
var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
func (d Duration) String() string {
seconds := int64(time.Duration(d) / time.Second)
var (
ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms"
)
factors := map[string]int64{
"d": 60 * 60 * 24,
"h": 60 * 60,
"m": 60,
"s": 1,
"y": 1000 * 60 * 60 * 24 * 365,
"w": 1000 * 60 * 60 * 24 * 7,
"d": 1000 * 60 * 60 * 24,
"h": 1000 * 60 * 60,
"m": 1000 * 60,
"s": 1000,
"ms": 1,
}
unit := "s"
switch int64(0) {
case seconds % factors["d"]:
case ms % factors["y"]:
unit = "y"
case ms % factors["w"]:
unit = "w"
case ms % factors["d"]:
unit = "d"
case seconds % factors["h"]:
case ms % factors["h"]:
unit = "h"
case seconds % factors["m"]:
case ms % factors["m"]:
unit = "m"
case ms % factors["s"]:
unit = "s"
}
return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
}
// MarshalYAML implements the yaml.Marshaler interface.

View file

@ -16,11 +16,28 @@ package model
import (
"encoding/json"
"fmt"
"math"
"sort"
"strconv"
"strings"
)
var (
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
ZeroSamplePair = SamplePair{Timestamp: Earliest}
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
// of 0, which is possible to appear in a real Sample and thus not suitable
// to signal a non-existing Sample.
ZeroSample = Sample{Timestamp: Earliest}
)
// A SampleValue is a representation of a value for a given sample at a given
// time.
type SampleValue float64
@ -43,8 +60,14 @@ func (v *SampleValue) UnmarshalJSON(b []byte) error {
return nil
}
// Equal returns true if the value of v and o is equal or if both are NaN. Note
// that v==o is false if both are NaN. If you want the conventional float
// behavior, use == to compare two SampleValues.
func (v SampleValue) Equal(o SampleValue) bool {
return v == o
if v == o {
return true
}
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
}
func (v SampleValue) String() string {
@ -77,9 +100,9 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
}
// Equal returns true if this SamplePair and o have equal Values and equal
// Timestamps.
// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
func (s *SamplePair) Equal(o *SamplePair) bool {
return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
}
func (s SamplePair) String() string {
@ -93,7 +116,8 @@ type Sample struct {
Timestamp Time `json:"timestamp"`
}
// Equal compares first the metrics, then the timestamp, then the value.
// Equal compares first the metrics, then the timestamp, then the value. The
// sematics of value equality is defined by SampleValue.Equal.
func (s *Sample) Equal(o *Sample) bool {
if s == o {
return true
@ -105,7 +129,7 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
if s.Value != o.Value {
if s.Value.Equal(o.Value) {
return false
}

View file

@ -1,7 +1,5 @@
sudo: false
language: go
go:
- 1.3
- 1.4
- 1.5
- tip
- 1.6

View file

@ -13,7 +13,7 @@ The following individuals have contributed code to this repository
* David Cournapeau <cournape@gmail.com>
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
* Jonas Große Sundrup <cherti@letopolis.de>
* Julius Volz <julius@soundcloud.com>
* Julius Volz <julius.volz@gmail.com>
* Matthias Rampke <mr@soundcloud.com>
* Nicky Gerritsen <nicky@streamone.nl>
* Rémi Audebert <contact@halfr.net>

View file

@ -1,5 +1,5 @@
ci:
go fmt
! gofmt -l *.go | read nothing
go vet
go test -v ./...
go get github.com/golang/lint/golint

View file

@ -27,14 +27,7 @@ func NewFS(mountPoint string) (FS, error) {
return FS(mountPoint), nil
}
func (fs FS) stat(p string) (os.FileInfo, error) {
return os.Stat(path.Join(string(fs), p))
}
func (fs FS) open(p string) (*os.File, error) {
return os.Open(path.Join(string(fs), p))
}
func (fs FS) readlink(p string) (string, error) {
return os.Readlink(path.Join(string(fs), p))
// Path returns the path of the given subsystem relative to the procfs root.
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}

View file

@ -8,6 +8,7 @@ import (
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
)
@ -58,7 +59,7 @@ func NewIPVSStats() (IPVSStats, error) {
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) NewIPVSStats() (IPVSStats, error) {
file, err := fs.open("net/ip_vs_stats")
file, err := os.Open(fs.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
@ -127,7 +128,7 @@ func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
file, err := fs.open("net/ip_vs")
file, err := os.Open(fs.Path("net/ip_vs"))
if err != nil {
return nil, err
}

View file

@ -3,7 +3,6 @@ package procfs
import (
"fmt"
"io/ioutil"
"path"
"regexp"
"strconv"
"strings"
@ -32,36 +31,22 @@ type MDStat struct {
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
mdStatusFilePath := path.Join(string(fs), "mdstat")
mdStatusFilePath := fs.Path("mdstat")
content, err := ioutil.ReadFile(mdStatusFilePath)
if err != nil {
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
}
mdStatusFile := string(content)
lines := strings.Split(mdStatusFile, "\n")
var currentMD string
// Each md has at least the deviceline, statusline and one empty line afterwards
// so we will have probably something of the order len(lines)/3 devices
// so we use that for preallocation.
estimateMDs := len(lines) / 3
mdStates := make([]MDStat, 0, estimateMDs)
mdStates := []MDStat{}
lines := strings.Split(string(content), "\n")
for i, l := range lines {
if l == "" {
// Skip entirely empty lines.
continue
}
if l[0] == ' ' {
// Those lines are not the beginning of a md-section.
continue
}
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
// We aren't interested in lines with general info.
continue
}
@ -69,32 +54,30 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
if len(mainLine) < 3 {
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
}
currentMD = mainLine[0] // name of md-device
activityState := mainLine[2] // activity status of said md-device
mdName := mainLine[0]
activityState := mainLine[2]
if len(lines) <= i+3 {
return mdStates, fmt.Errorf("error parsing %s: entry for %s has fewer lines than expected", mdStatusFilePath, currentMD)
return mdStates, fmt.Errorf(
"error parsing %s: too few lines for md device %s",
mdStatusFilePath,
mdName,
)
}
active, total, size, err := evalStatusline(lines[i+1]) // parse statusline, always present
active, total, size, err := evalStatusline(lines[i+1])
if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
}
//
// Now get the number of synced blocks.
//
// Get the line number of the syncing-line.
var j int
if strings.Contains(lines[i+2], "bitmap") { // then skip the bitmap line
// j is the line number of the syncing-line.
j := i + 2
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
j = i + 3
} else {
j = i + 2
}
// If device is syncing at the moment, get the number of currently synced bytes,
// otherwise that number equals the size of the device.
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
syncedBlocks, err = evalBuildline(lines[j])
@ -103,8 +86,14 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
}
}
mdStates = append(mdStates, MDStat{currentMD, activityState, active, total, size, syncedBlocks})
mdStates = append(mdStates, MDStat{
Name: mdName,
ActivityState: activityState,
DisksActive: active,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
})
}
return mdStates, nil
@ -112,47 +101,38 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
func evalStatusline(statusline string) (active, total, size int64, err error) {
matches := statuslineRE.FindStringSubmatch(statusline)
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
if len(matches) != 3+1 {
return 0, 0, 0, fmt.Errorf("unexpected number matches found in statusline: %s", statusline)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
}
size, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
}
return active, total, size, nil
}
// Gets the size that has already been synced out of the sync-line.
func evalBuildline(buildline string) (int64, error) {
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
matches := buildlineRE.FindStringSubmatch(buildline)
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
if len(matches) < 1+1 {
return 0, fmt.Errorf("too few matches found in buildline: %s", buildline)
if len(matches) != 2 {
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
}
if len(matches) > 1+1 {
return 0, fmt.Errorf("too many matches found in buildline: %s", buildline)
}
syncedSize, err := strconv.ParseInt(matches[1], 10, 64)
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
}
return syncedSize, nil
return syncedBlocks, nil
}

View file

@ -4,7 +4,6 @@ import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
)
@ -42,7 +41,7 @@ func NewProc(pid int) (Proc, error) {
return fs.NewProc(pid)
}
// AllProcs returns a list of all currently avaible processes under /proc.
// AllProcs returns a list of all currently available processes under /proc.
func AllProcs() (Procs, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
@ -53,7 +52,7 @@ func AllProcs() (Procs, error) {
// Self returns a process for the current process.
func (fs FS) Self() (Proc, error) {
p, err := fs.readlink("self")
p, err := os.Readlink(fs.Path("self"))
if err != nil {
return Proc{}, err
}
@ -66,15 +65,15 @@ func (fs FS) Self() (Proc, error) {
// NewProc returns a process for the given pid.
func (fs FS) NewProc(pid int) (Proc, error) {
if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently avaible processes.
// AllProcs returns a list of all currently available processes.
func (fs FS) AllProcs() (Procs, error) {
d, err := fs.open("")
d, err := os.Open(fs.Path())
if err != nil {
return Procs{}, err
}
@ -99,7 +98,7 @@ func (fs FS) AllProcs() (Procs, error) {
// CmdLine returns the command line of a process.
func (p Proc) CmdLine() ([]string, error) {
f, err := p.open("cmdline")
f, err := os.Open(p.path("cmdline"))
if err != nil {
return nil, err
}
@ -117,10 +116,25 @@ func (p Proc) CmdLine() ([]string, error) {
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
}
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
f, err := os.Open(p.path("comm"))
if err != nil {
return "", err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return strings.TrimSpace(string(data)), nil
}
// Executable returns the absolute path of the executable command of a process.
func (p Proc) Executable() (string, error) {
exe, err := p.readlink("exe")
exe, err := os.Readlink(p.path("exe"))
if os.IsNotExist(err) {
return "", nil
}
@ -158,7 +172,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
targets := make([]string, len(names))
for i, name := range names {
target, err := p.readlink("fd/" + name)
target, err := os.Readlink(p.path("fd", name))
if err == nil {
targets[i] = target
}
@ -179,7 +193,7 @@ func (p Proc) FileDescriptorsLen() (int, error) {
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := p.open("fd")
d, err := os.Open(p.path("fd"))
if err != nil {
return nil, err
}
@ -193,10 +207,6 @@ func (p Proc) fileDescriptors() ([]string, error) {
return names, nil
}
func (p Proc) open(pa string) (*os.File, error) {
return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
}
func (p Proc) readlink(pa string) (string, error) {
return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa))
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}

View file

@ -3,6 +3,7 @@ package procfs
import (
"fmt"
"io/ioutil"
"os"
)
// ProcIO models the content of /proc/<pid>/io.
@ -29,7 +30,7 @@ type ProcIO struct {
func (p Proc) NewIO() (ProcIO, error) {
pio := ProcIO{}
f, err := p.open("io")
f, err := os.Open(p.path("io"))
if err != nil {
return pio, err
}

View file

@ -3,29 +3,56 @@ package procfs
import (
"bufio"
"fmt"
"os"
"regexp"
"strconv"
)
// ProcLimits represents the soft limits for each of the process's resource
// limits.
// limits. For more information see getrlimit(2):
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
CPUTime int
FileSize int
DataSize int
StackSize int
CoreFileSize int
ResidentSet int
Processes int
OpenFiles int
LockedMemory int
AddressSpace int
FileLocks int
PendingSignals int
MsqqueueSize int
NicePriority int
// CPU time limit in seconds.
CPUTime int
// Maximum size of files that the process may create.
FileSize int
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
DataSize int
// Maximum size of the process stack in bytes.
StackSize int
// Maximum size of a core file.
CoreFileSize int
// Limit of the process's resident set in pages.
ResidentSet int
// Maximum number of processes that can be created for the real user ID of
// the calling process.
Processes int
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
OpenFiles int
// Maximum number of bytes of memory that may be locked into RAM.
LockedMemory int
// Maximum size of the process's virtual memory address space in bytes.
AddressSpace int
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
FileLocks int
// Limit of signals that may be queued for the real user ID of the calling
// process.
PendingSignals int
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
MsqqueueSize int
// Limit of the nice priority set using setpriority(2) or nice(2).
NicePriority int
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
RealtimePriority int
RealtimeTimeout int
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
RealtimeTimeout int
}
const (
@ -39,7 +66,7 @@ var (
// NewLimits returns the current soft limits of the process.
func (p Proc) NewLimits() (ProcLimits, error) {
f, err := p.open("limits")
f, err := os.Open(p.path("limits"))
if err != nil {
return ProcLimits{}, err
}
@ -60,7 +87,7 @@ func (p Proc) NewLimits() (ProcLimits, error) {
case "Max cpu time":
l.CPUTime, err = parseInt(fields[1])
case "Max file size":
l.FileLocks, err = parseInt(fields[1])
l.FileSize, err = parseInt(fields[1])
case "Max data size":
l.DataSize, err = parseInt(fields[1])
case "Max stack size":
@ -90,7 +117,6 @@ func (p Proc) NewLimits() (ProcLimits, error) {
case "Max realtime timeout":
l.RealtimeTimeout, err = parseInt(fields[1])
}
if err != nil {
return ProcLimits{}, err
}

View file

@ -7,15 +7,15 @@ import (
"os"
)
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
// required cgo. However, that caused a lot of problems regarding
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
// which required cgo. However, that caused a lot of problems regarding
// cross-compilation. Alternatives such as running a binary to determine the
// value, or trying to derive it in some other way were all problematic.
// After much research it was determined that USER_HZ is actually hardcoded to
// 100 on all Go-supported platforms as of the time of this writing. This is
// why we decided to hardcode it here as well. It is not impossible that there
// could be systems with exceptions, but they should be very exotic edge cases,
// and in that case, the worst outcome will be two misreported metrics.
// value, or trying to derive it in some other way were all problematic. After
// much research it was determined that USER_HZ is actually hardcoded to 100 on
// all Go-supported platforms as of the time of this writing. This is why we
// decided to hardcode it here as well. It is not impossible that there could
// be systems with exceptions, but they should be very exotic edge cases, and
// in that case, the worst outcome will be two misreported metrics.
//
// See also the following discussions:
//
@ -91,7 +91,7 @@ type ProcStat struct {
// NewStat returns the current status information of the process.
func (p Proc) NewStat() (ProcStat, error) {
f, err := p.open("stat")
f, err := os.Open(p.path("stat"))
if err != nil {
return ProcStat{}, err
}

View file

@ -3,6 +3,7 @@ package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
@ -25,7 +26,7 @@ func NewStat() (Stat, error) {
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
f, err := fs.open("stat")
f, err := os.Open(fs.Path("stat"))
if err != nil {
return Stat{}, err
}

View file

@ -133,7 +133,7 @@ type FederatedReplicaSetPreferences struct {
// +optional
Rebalance bool `json:"rebalance,omitempty"`
// A mapping between cluser names and preferences regarding local replicasets in these clusters.
// A mapping between cluster names and preferences regarding local ReplicaSet in these clusters.
// "*" (if provided) applies to all clusters if an explicit mapping is not provided. If there is no
// "*" that clusters without explicit preferences should not have any replicas scheduled.
// +optional

View file

@ -1514,7 +1514,7 @@ message NodeSpec {
optional string providerID = 3;
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"`
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"
// +optional
optional bool unschedulable = 4;
}

View file

@ -2818,7 +2818,7 @@ type NodeSpec struct {
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"`
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
}

View file

@ -901,7 +901,7 @@ var map_NodeSpec = map[string]string{
"podCIDR": "PodCIDR represents the pod IP range assigned to the node.",
"externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.",
"providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"`",
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"",
}
func (NodeSpec) SwaggerDoc() map[string]string {

View file

@ -2666,7 +2666,13 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
}
// TODO(freehan): allow user to update loadbalancerSourceRanges
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
// Only allow removing LoadBalancerSourceRanges when change service type from LoadBalancer
// to non-LoadBalancer or adding LoadBalancerSourceRanges when change service type from
// non-LoadBalancer to LoadBalancer.
if service.Spec.Type != api.ServiceTypeLoadBalancer && oldService.Spec.Type != api.ServiceTypeLoadBalancer ||
service.Spec.Type == api.ServiceTypeLoadBalancer && oldService.Spec.Type == api.ServiceTypeLoadBalancer {
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
}
allErrs = append(allErrs, validateServiceFields(service)...)
allErrs = append(allErrs, validateServiceAnnotations(service, oldService)...)

View file

@ -50,6 +50,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{},
&api.ListOptions{},
&api.DeleteOptions{},
)
return nil
}

View file

@ -37,3 +37,16 @@ go_library(
"//vendor:github.com/ugorji/go/codec",
],
)
go_test(
name = "go_default_xtest",
srcs = ["defaults_test.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/install:go_default_library",
"//pkg/apis/autoscaling/install:go_default_library",
"//pkg/apis/autoscaling/v1:go_default_library",
"//pkg/runtime:go_default_library",
],
)

View file

@ -52,6 +52,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&CronJob{},
&CronJobList{},
&api.ListOptions{},
&api.DeleteOptions{},
)
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{})

File diff suppressed because it is too large Load diff

View file

@ -294,7 +294,7 @@ type KubeletConfiguration struct {
// And all Burstable and BestEffort pods are brought up under their
// specific top level QoS cgroup.
// +optional
CgroupsPerQOS bool `json:"cgroupsPerQOS,omitempty"`
ExperimentalCgroupsPerQOS bool `json:"experimentalCgroupsPerQOS,omitempty"`
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional
CgroupDriver string `json:"cgroupDriver,omitempty"`
@ -307,7 +307,7 @@ type KubeletConfiguration struct {
// +optional
SystemCgroups string `json:"systemCgroups,omitempty"`
// CgroupRoot is the root cgroup to use for pods.
// If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
// If ExperimentalCgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
// +optional
CgroupRoot string `json:"cgroupRoot,omitempty"`
// containerRuntime is the container runtime to use.
@ -466,6 +466,10 @@ type KubeletConfiguration struct {
// TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled.
// Tells the Kubelet to fail to start if swap is enabled on the node.
ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"`
// This flag, if set, enables a check prior to mount operations to verify that the required components
// (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled
// and fails the mount operation fails.
ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"`
}
type KubeletAuthorizationMode string

View file

@ -204,8 +204,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.CertDirectory == "" {
obj.CertDirectory = "/var/run/kubernetes"
}
if obj.CgroupsPerQOS == nil {
obj.CgroupsPerQOS = boolVar(false)
if obj.ExperimentalCgroupsPerQOS == nil {
obj.ExperimentalCgroupsPerQOS = boolVar(false)
}
if obj.ContainerRuntime == "" {
obj.ContainerRuntime = "docker"
@ -391,9 +391,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
temp := int32(defaultIPTablesDropBit)
obj.IPTablesDropBit = &temp
}
if obj.CgroupsPerQOS == nil {
if obj.ExperimentalCgroupsPerQOS == nil {
temp := false
obj.CgroupsPerQOS = &temp
obj.ExperimentalCgroupsPerQOS = &temp
}
if obj.CgroupDriver == "" {
obj.CgroupDriver = "cgroupfs"
@ -401,8 +401,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
// NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional.
// if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the
// container runtime default and not default to the root cgroup.
if obj.CgroupsPerQOS != nil {
if *obj.CgroupsPerQOS {
if obj.ExperimentalCgroupsPerQOS != nil {
if *obj.ExperimentalCgroupsPerQOS {
if obj.CgroupRoot == "" {
obj.CgroupRoot = "/"
}

View file

@ -355,7 +355,7 @@ type KubeletConfiguration struct {
// And all Burstable and BestEffort pods are brought up under their
// specific top level QoS cgroup.
// +optional
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"`
ExperimentalCgroupsPerQOS *bool `json:"experimentalCgroupsPerQOS,omitempty"`
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
// +optional
CgroupDriver string `json:"cgroupDriver,omitempty"`
@ -505,6 +505,10 @@ type KubeletConfiguration struct {
// TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled.
// Tells the Kubelet to fail to start if swap is enabled on the node.
ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"`
// This flag, if set, enables a check prior to mount operations to verify that the required components
// (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled
// and fails the mount operation fails.
ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"`
}
type KubeletAuthorizationMode string

View file

@ -330,7 +330,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
if err := api.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
return err
}
out.CgroupDriver = in.CgroupDriver
@ -407,6 +407,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}
@ -495,7 +496,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.CloudProvider = in.CloudProvider
out.CloudConfigFile = in.CloudConfigFile
out.KubeletCgroups = in.KubeletCgroups
if err := api.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
return err
}
out.CgroupDriver = in.CgroupDriver
@ -575,6 +576,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}

View file

@ -302,12 +302,12 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups
out.CgroupRoot = in.CgroupRoot
if in.CgroupsPerQOS != nil {
in, out := &in.CgroupsPerQOS, &out.CgroupsPerQOS
if in.ExperimentalCgroupsPerQOS != nil {
in, out := &in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS
*out = new(bool)
**out = **in
} else {
out.CgroupsPerQOS = nil
out.ExperimentalCgroupsPerQOS = nil
}
out.CgroupDriver = in.CgroupDriver
out.ContainerRuntime = in.ContainerRuntime
@ -461,6 +461,7 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}
}

View file

@ -308,7 +308,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.CloudProvider = in.CloudProvider
out.CloudConfigFile = in.CloudConfigFile
out.KubeletCgroups = in.KubeletCgroups
out.CgroupsPerQOS = in.CgroupsPerQOS
out.ExperimentalCgroupsPerQOS = in.ExperimentalCgroupsPerQOS
out.CgroupDriver = in.CgroupDriver
out.RuntimeCgroups = in.RuntimeCgroups
out.SystemCgroups = in.SystemCgroups
@ -392,6 +392,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.FeatureGates = in.FeatureGates
out.EnableCRI = in.EnableCRI
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
return nil
}
}

View file

@ -124,7 +124,6 @@ type ThirdPartyResource struct {
Description string `json:"description,omitempty"`
// Versions are versions for this third party object
// +optional
Versions []APIVersion `json:"versions,omitempty"`
}
@ -143,7 +142,6 @@ type ThirdPartyResourceList struct {
// TODO: we should consider merge this struct with GroupVersion in unversioned.go
type APIVersion struct {
// Name of this version (e.g. 'v1').
// +optional
Name string `json:"name,omitempty"`
}

View file

@ -68,6 +68,9 @@ func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorL
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...)
versions := sets.String{}
if len(obj.Versions) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("versions"), "must specify at least one version"))
}
for ix := range obj.Versions {
version := &obj.Versions[ix]
if len(version.Name) == 0 {

View file

@ -69,6 +69,7 @@ func (i *DefaultInfo) GetExtra() map[string][]string {
// well-known user and group names
const (
SystemPrivilegedGroup = "system:masters"
NodesGroup = "system:nodes"
AllUnauthenticated = "system:unauthenticated"
AllAuthenticated = "system:authenticated"

View file

@ -29,7 +29,7 @@ import (
type Config struct {
// The queue for your objects; either a FIFO or
// a DeltaFIFO. Your Process() function should accept
// the output of this Oueue's Pop() method.
// the output of this Queue's Pop() method.
Queue
// Something that can list and watch your objects.
@ -121,6 +121,11 @@ func (c *Controller) Requeue(obj interface{}) error {
// TODO: Consider doing the processing in parallel. This will require a little thought
// to make sure that we don't end up processing the same object multiple times
// concurrently.
//
// TODO: Plumb through the stopCh here (and down to the queue) so that this can
// actually exit when the controller is stopped. Or just give up on this stuff
// ever being stoppable. Converting this whole package to use Context would
// also be helpful.
func (c *Controller) processLoop() {
for {
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
@ -134,7 +139,7 @@ func (c *Controller) processLoop() {
}
// ResourceEventHandler can handle notifications for events that happen to a
// resource. The events are informational only, so you can't return an
// resource. The events are informational only, so you can't return an
// error.
// * OnAdd is called when an object is added.
// * OnUpdate is called when an object is modified. Note that oldObj is the

View file

@ -45,7 +45,7 @@ import (
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
type Reflector struct {
// name identifies this reflector. By default it will be a file:line if possible.
// name identifies this reflector. By default it will be a file:line if possible.
name string
// The type of object we expect to place in the store.
@ -74,12 +74,6 @@ var (
// However, it can be modified to avoid periodic resync to break the
// TCP connection.
minWatchTimeout = 5 * time.Minute
// If we are within 'forceResyncThreshold' from the next planned resync
// and are just before issuing Watch(), resync will be forced now.
forceResyncThreshold = 3 * time.Second
// We try to set timeouts for Watch() so that we will finish about
// than 'timeoutThreshold' from next planned periodic resync.
timeoutThreshold = 1 * time.Second
)
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
@ -114,7 +108,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
return r
}
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
// call chains to NewReflector, so they'd be low entropy names for reflectors
var internalPackages = []string{"kubernetes/pkg/client/cache/", "/runtime/asm_"}

View file

@ -148,14 +148,14 @@ type EventSinkImpl struct {
Interface EventInterface
}
func (e EventSinkImpl) Create(event *api.Event) (*api.Event, error) {
func (e *EventSinkImpl) Create(event *api.Event) (*api.Event, error) {
return e.Interface.CreateWithEventNamespace(event)
}
func (e EventSinkImpl) Update(event *api.Event) (*api.Event, error) {
func (e *EventSinkImpl) Update(event *api.Event) (*api.Event, error) {
return e.Interface.UpdateWithEventNamespace(event)
}
func (e EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) {
func (e *EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) {
return e.Interface.PatchWithEventNamespace(event, data)
}

View file

@ -213,9 +213,11 @@ func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversion
const maxRetries = 2
var failedGroups map[unversioned.GroupVersion]error
var results []unversioned.GroupVersionResource
var resources map[unversioned.GroupResource]string
RetrieveGroups:
for i := 0; i < maxRetries; i++ {
results = []unversioned.GroupVersionResource{}
resources = map[unversioned.GroupResource]string{}
failedGroups = make(map[unversioned.GroupVersion]error)
serverGroupList, err := d.ServerGroups()
if err != nil {
@ -223,25 +225,40 @@ RetrieveGroups:
}
for _, apiGroup := range serverGroupList.Groups {
preferredVersion := apiGroup.PreferredVersion
groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version}
apiResourceList, err := d.ServerResourcesForGroupVersion(preferredVersion.GroupVersion)
if err != nil {
if i < maxRetries-1 {
continue RetrieveGroups
}
failedGroups[groupVersion] = err
continue
}
for _, apiResource := range apiResourceList.APIResources {
// ignore the root scoped resources if "namespaced" is true.
if namespaced && !apiResource.Namespaced {
versions := apiGroup.Versions
for _, version := range versions {
groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: version.Version}
apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
if err != nil {
if i < maxRetries-1 {
continue RetrieveGroups
}
failedGroups[groupVersion] = err
continue
}
if strings.Contains(apiResource.Name, "/") {
continue
for _, apiResource := range apiResourceList.APIResources {
// ignore the root scoped resources if "namespaced" is true.
if namespaced && !apiResource.Namespaced {
continue
}
if strings.Contains(apiResource.Name, "/") {
continue
}
gvr := groupVersion.WithResource(apiResource.Name)
if _, ok := resources[gvr.GroupResource()]; ok {
if gvr.Version != apiGroup.PreferredVersion.Version {
continue
}
// remove previous entry, because it will be replaced with a preferred one
for i := range results {
if results[i].GroupResource() == gvr.GroupResource() {
results = append(results[:i], results[i+1:]...)
}
}
}
resources[gvr.GroupResource()] = gvr.Version
results = append(results, gvr)
}
results = append(results, groupVersion.WithResource(apiResource.Name))
}
}
if len(failedGroups) == 0 {

View file

@ -608,6 +608,8 @@ func (gce *GCECloud) EnsureLoadBalancer(clusterName string, apiService *api.Serv
// an IP, we assume they are managing it themselves. Otherwise, we will
// release the IP in case of early-terminating failure or upon successful
// creating of the LB.
// TODO(#36535): boil this logic down into a set of component functions
// and key the flag values off of errors returned.
isUserOwnedIP := false // if this is set, we never release the IP
isSafeToReleaseIP := false
defer func() {
@ -735,7 +737,7 @@ func (gce *GCECloud) EnsureLoadBalancer(clusterName string, apiService *api.Serv
return nil, fmt.Errorf("Error checking HTTP health check %s: %v", loadBalancerName, err)
}
if path, healthCheckNodePort := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" {
glog.V(4).Infof("service %v needs health checks on :%d/%s)", apiService.Name, healthCheckNodePort, path)
glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path)
if err != nil {
// This logic exists to detect a transition for a pre-existing service and turn on
// the tpNeedsUpdate flag to delete/recreate fwdrule/tpool adding the health check
@ -1080,13 +1082,18 @@ func (gce *GCECloud) createTargetPool(name, serviceName, region string, hosts []
for _, host := range hosts {
instances = append(instances, makeHostURL(gce.projectID, host.Zone, host.Name))
}
// health check management is coupled with targetPools to prevent leaks. A
// target pool is the only thing that requires a health check, so we delete
// associated checks on teardown, and ensure checks on setup.
hcLinks := []string{}
if hc != nil {
var err error
if hc, err = gce.ensureHttpHealthCheck(name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil {
return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", name, hc.Port, hc.RequestPath, err)
}
hcLinks = append(hcLinks, hc.SelfLink)
}
if len(hcLinks) > 0 {
glog.Infof("Creating targetpool %v with healthchecking", name)
}
glog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks))
pool := &compute.TargetPool{
Name: name,
Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName),

View file

@ -520,11 +520,11 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
if err != nil {
return fmt.Errorf("object does not have ObjectMeta, %v", err)
}
glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
r.Recorder.Eventf(object, api.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
return fmt.Errorf("unable to delete pods: %v", err)
} else {
glog.V(4).Infof("Controller %v deleted pod %v", accessor.GetName(), podID)
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID)
}
return nil

View file

@ -46,7 +46,6 @@ go_library(
"sorted_resource_name_list.go",
"sorting_printer.go",
"stop.go",
"version.go",
],
tags = ["automanaged"],
deps = [
@ -69,6 +68,7 @@ go_library(
"//pkg/apis/batch/v2alpha1:go_default_library",
"//pkg/apis/certificates:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/policy:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/storage:go_default_library",
"//pkg/apis/storage/util:go_default_library",
@ -101,7 +101,6 @@ go_library(
"//pkg/util/uuid:go_default_library",
"//pkg/util/validation:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/version:go_default_library",
"//pkg/watch:go_default_library",
"//vendor:github.com/emicklei/go-restful/swagger",
"//vendor:github.com/ghodss/yaml",
@ -155,6 +154,7 @@ go_test(
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/policy:go_default_library",
"//pkg/apis/storage:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",

View file

@ -386,18 +386,6 @@ func (f *factory) UnstructuredObject() (meta.RESTMapper, runtime.ObjectTyper, er
return nil, nil, err
}
// Register unknown APIs as third party for now to make
// validation happy. TODO perhaps make a dynamic schema
// validator to avoid this.
for _, group := range groupResources {
for _, version := range group.Group.Versions {
gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version}
if !registered.IsRegisteredVersion(gv) {
registered.AddThirdPartyAPIGroupVersions(gv)
}
}
}
mapper := discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, meta.InterfacesForUnstructured)
typer := discovery.NewUnstructuredObjectTyper(groupResources)
return NewShortcutExpander(mapper, discoveryClient), typer, nil
@ -1148,10 +1136,7 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error {
return err
}
if ok := registered.IsEnabledVersion(gvk.GroupVersion()); !ok {
return fmt.Errorf("API version %q isn't supported, only supports API versions %q", gvk.GroupVersion().String(), registered.EnabledVersions())
}
if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
// Don't attempt to validate third party objects
// if we don't have this in our scheme, just skip validation because its an object we don't recognize
return nil
}

View file

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/certificates"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/apis/storage"
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
@ -120,6 +121,7 @@ func describerMap(c clientset.Interface) map[unversioned.GroupKind]Describer {
apps.Kind("StatefulSet"): &StatefulSetDescriber{c},
certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c},
storage.Kind("StorageClass"): &StorageClassDescriber{c},
policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c},
}
return m
@ -1801,6 +1803,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings Descr
func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) {
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "Name:\t%s\n", node.Name)
fmt.Fprintf(out, "Role:\t%s\n", findNodeRole(node))
printLabelsMultiline(out, "Labels", node.Labels)
printTaintsInAnnotationMultiline(out, "Taints", node.Annotations)
fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z))
@ -2407,6 +2410,41 @@ func (s *StorageClassDescriber) Describe(namespace, name string, describerSettin
})
}
type PodDisruptionBudgetDescriber struct {
clientset.Interface
}
func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) {
pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name)
if err != nil {
return "", err
}
return tabbedString(func(out io.Writer) error {
fmt.Fprintf(out, "Name:\t%s\n", pdb.Name)
fmt.Fprintf(out, "Min available:\t%s\n", pdb.Spec.MinAvailable.String())
if pdb.Spec.Selector != nil {
fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(pdb.Spec.Selector))
} else {
fmt.Fprintf(out, "Selector:\t<unset>\n")
}
fmt.Fprintf(out, "Status:\n")
fmt.Fprintf(out, " Allowed disruptions:\t%d\n", pdb.Status.PodDisruptionsAllowed)
fmt.Fprintf(out, " Current:\t%d\n", pdb.Status.CurrentHealthy)
fmt.Fprintf(out, " Desired:\t%d\n", pdb.Status.DesiredHealthy)
fmt.Fprintf(out, " Total:\t%d\n", pdb.Status.ExpectedPods)
if describerSettings.ShowEvents {
events, err := p.Core().Events(namespace).Search(pdb)
if err != nil {
return err
}
if events != nil {
DescribeEvents(events, out)
}
}
return nil
})
}
// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types.
func newErrNoDescriber(types ...reflect.Type) error {
names := make([]string, 0, len(types))

View file

@ -58,6 +58,17 @@ func filterPods(obj runtime.Object, options PrintOptions) bool {
// Filter loops through a collection of FilterFuncs until it finds one that can filter the given resource
func (f Filters) Filter(obj runtime.Object, opts *PrintOptions) (bool, error) {
// check if the object is unstructured. If so, let's attempt to convert it to a type we can understand
// before apply filter func.
switch obj.(type) {
case *runtime.UnstructuredList, *runtime.Unstructured, *runtime.Unknown:
if objBytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj); err == nil {
if decodedObj, err := runtime.Decode(api.Codecs.UniversalDecoder(), objBytes); err == nil {
obj = decodedObj
}
}
}
for _, filter := range f {
if ok := filter(obj, *opts); ok {
return true, nil

View file

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/certificates"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/apis/storage"
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
@ -474,6 +475,7 @@ func (h *HumanReadablePrinter) AfterPrint(output io.Writer, res string) error {
var (
podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
podDisruptionBudgetColumns = []string{"NAME", "MIN-AVAILABLE", "ALLOWED-DISRUPTIONS", "AGE"}
replicationControllerColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"}
replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"}
jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"}
@ -536,6 +538,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() {
h.Handler(podColumns, h.printPod)
h.Handler(podTemplateColumns, printPodTemplate)
h.Handler(podTemplateColumns, printPodTemplateList)
h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudget)
h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudgetList)
h.Handler(replicationControllerColumns, printReplicationController)
h.Handler(replicationControllerColumns, printReplicationControllerList)
h.Handler(replicaSetColumns, printReplicaSet)
@ -828,6 +832,37 @@ func printPodTemplateList(podList *api.PodTemplateList, w io.Writer, options Pri
return nil
}
func printPodDisruptionBudget(pdb *policy.PodDisruptionBudget, w io.Writer, options PrintOptions) error {
// name, minavailable, selector
name := formatResourceName(options.Kind, pdb.Name, options.WithKind)
namespace := pdb.Namespace
if options.WithNamespace {
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
return err
}
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\n",
name,
pdb.Spec.MinAvailable.String(),
pdb.Status.PodDisruptionsAllowed,
translateTimestamp(pdb.CreationTimestamp),
); err != nil {
return err
}
return nil
}
func printPodDisruptionBudgetList(pdbList *policy.PodDisruptionBudgetList, w io.Writer, options PrintOptions) error {
for _, pdb := range pdbList.Items {
if err := printPodDisruptionBudget(&pdb, w, options); err != nil {
return err
}
}
return nil
}
// TODO(AdoHe): try to put wide output in a single method
func printReplicationController(controller *api.ReplicationController, w io.Writer, options PrintOptions) error {
name := formatResourceName(options.Kind, controller.Name, options.WithKind)
@ -1491,6 +1526,10 @@ func printNode(node *api.Node, w io.Writer, options PrintOptions) error {
if node.Spec.Unschedulable {
status = append(status, "SchedulingDisabled")
}
role := findNodeRole(node)
if role != "" {
status = append(status, role)
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, strings.Join(status, ","), translateTimestamp(node.CreationTimestamp)); err != nil {
return err
@ -1520,6 +1559,22 @@ func getNodeExternalIP(node *api.Node) string {
return "<none>"
}
// findNodeRole returns the role of a given node, or "" if none found.
// The role is determined by looking in order for:
// * a kubernetes.io/role label
// * a kubeadm.alpha.kubernetes.io/role label
// If no role is found, ("", nil) is returned
func findNodeRole(node *api.Node) string {
if role := node.Labels[unversioned.NodeLabelRole]; role != "" {
return role
}
if role := node.Labels[unversioned.NodeLabelKubeadmAlphaRole]; role != "" {
return role
}
// No role found
return ""
}
func printNodeList(list *api.NodeList, w io.Writer, options PrintOptions) error {
for _, node := range list.Items {
if err := printNode(&node, w, options); err != nil {
@ -2263,7 +2318,7 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er
// check if the object is unstructured. If so, let's attempt to convert it to a type we can understand before
// trying to print, since the printers are keyed by type. This is extremely expensive.
switch obj.(type) {
case *runtime.Unstructured, *runtime.Unknown:
case *runtime.UnstructuredList, *runtime.Unstructured, *runtime.Unknown:
if objBytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj); err == nil {
if decodedObj, err := runtime.Decode(api.Codecs.UniversalDecoder(), objBytes); err == nil {
obj = decodedObj

View file

@ -1,28 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"fmt"
"io"
"k8s.io/kubernetes/pkg/version"
)
func GetClientVersion(w io.Writer) {
fmt.Fprintf(w, "Client Version: %#v\n", version.Get())
}

View file

@ -87,6 +87,7 @@ go_test(
"//pkg/storage/etcd:go_default_library",
"//pkg/storage/etcd/etcdtest:go_default_library",
"//pkg/storage/etcd/testing:go_default_library",
"//pkg/storage/etcd3:go_default_library",
"//pkg/util/sets:go_default_library",
"//pkg/util/wait:go_default_library",
"//pkg/watch:go_default_library",

View file

@ -43,18 +43,26 @@ const (
dynamicKubeletConfig = "DynamicKubeletConfig"
dynamicVolumeProvisioning = "DynamicVolumeProvisioning"
streamingProxyRedirects = "StreamingProxyRedirects"
// experimentalHostUserNamespaceDefaulting Default userns=host for containers
// that are using other host namespaces, host mounts, the pod contains a privileged container,
// or specific non-namespaced capabilities
// (MKNOD, SYS_MODULE, SYS_TIME). This should only be enabled if user namespace remapping is enabled
// in the docker daemon.
experimentalHostUserNamespaceDefaultingGate = "ExperimentalHostUserNamespaceDefaulting"
)
var (
// Default values for recorded features. Every new feature gate should be
// represented here.
knownFeatures = map[string]featureSpec{
allAlphaGate: {false, alpha},
externalTrafficLocalOnly: {true, beta},
appArmor: {true, beta},
dynamicKubeletConfig: {false, alpha},
dynamicVolumeProvisioning: {true, alpha},
streamingProxyRedirects: {false, alpha},
allAlphaGate: {false, alpha},
externalTrafficLocalOnly: {true, beta},
appArmor: {true, beta},
dynamicKubeletConfig: {false, alpha},
dynamicVolumeProvisioning: {true, alpha},
streamingProxyRedirects: {false, alpha},
experimentalHostUserNamespaceDefaultingGate: {false, alpha},
}
// Special handling for a few gates.
@ -115,6 +123,10 @@ type FeatureGate interface {
// owner: timstclair
// alpha: v1.5
StreamingProxyRedirects() bool
// owner: @pweil-
// alpha: v1.5
ExperimentalHostUserNamespaceDefaulting() bool
}
// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
@ -209,6 +221,11 @@ func (f *featureGate) StreamingProxyRedirects() bool {
return f.lookup(streamingProxyRedirects)
}
// ExperimentalHostUserNamespaceDefaulting returns value for experimentalHostUserNamespaceDefaulting
func (f *featureGate) ExperimentalHostUserNamespaceDefaulting() bool {
return f.lookup(experimentalHostUserNamespaceDefaultingGate)
}
func (f *featureGate) lookup(key string) bool {
defaultValue := f.known[key].enabled
if f.enabled != nil {

View file

@ -30,7 +30,8 @@ import (
const (
// Default mount command if mounter path is not specified
defaultMountCommand = "mount"
defaultMountCommand = "mount"
MountsInGlobalPDPath = "mounts"
)
type Interface interface {
@ -189,9 +190,15 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str
glog.V(4).Infof("Directory %s is not mounted", mountPath)
return "", fmt.Errorf("directory %s is not mounted", mountPath)
}
basemountPath := path.Join(pluginDir, MountsInGlobalPDPath)
for _, ref := range refs {
if strings.HasPrefix(ref, pluginDir) {
return path.Base(ref), nil
if strings.HasPrefix(ref, basemountPath) {
volumeID, err := filepath.Rel(basemountPath, ref)
if err != nil {
glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err)
return "", err
}
return volumeID, nil
}
}

View file

@ -36,33 +36,42 @@ var ForeverTestTimeout = time.Second * 30
// NeverStop may be passed to Until to make it never stop.
var NeverStop <-chan struct{} = make(chan struct{})
// Forever is syntactic sugar on top of Until
// Forever calls f every period for ever.
//
// Forever is syntactic sugar on top of Until.
func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
// Until loops until stop channel is closed, running f every period.
// Until is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = true (which means the timer for period
// starts after the f completes).
//
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
// NonSlidingUntil loops until stop channel is closed, running f every
// period. NonSlidingUntil is syntactic sugar on top of JitterUntil
// with zero jitter factor, with sliding = false (meaning the timer for
// period starts at the same time as the function starts).
// period.
//
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged.
// Catches any panics, and keeps going. f may not be invoked if
// stop channel is already closed. Pass NeverStop to Until if you
// don't want it stop.
// If jitterFactor is not positive, the period is unchanged and not jitterd.
//
// If slidingis true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
for {
@ -104,9 +113,11 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
}
}
// Jitter returns a time.Duration between duration and duration + maxFactor * duration,
// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a
// suggested default value will be chosen.
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
// This allows clients to avoid converging on periodic behavior. If maxFactor
// is 0.0, a suggested default value will be chosen.
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
@ -115,26 +126,31 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
return wait
}
// ErrWaitTimeout is returned when the condition exited without success
// ErrWaitTimeout is returned when the condition exited without success.
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// Backoff is parameters applied to a Backoff function.
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
Duration time.Duration
Factor float64
Jitter float64
Steps int
Duration time.Duration // the base duration
Factor float64 // Duration is multipled by factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
// ExponentialBackoff repeats a condition check up to steps times, increasing the wait
// by multipling the previous duration by factor. If jitter is greater than zero,
// a random amount of each duration is added (between duration and duration*(1+jitter)).
// If the condition never returns true, ErrWaitTimeout is returned. All other errors
// terminate immediately.
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multipling
// the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
// (between duration and duration*(1+jitter)).
//
// If the condition never returns true, ErrWaitTimeout is returned. All other
// errors terminate immediately.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
duration := backoff.Duration
for i := 0; i < backoff.Steps; i++ {
@ -154,22 +170,33 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
}
// Poll tries a condition func until it returns true, an error, or the timeout
// is reached. condition will always be invoked at least once but some intervals
// may be missed if the condition takes too long or the time window is too short.
// is reached.
//
// Poll always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
// Poll always waits the interval before the first check of the condition.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return pollInternal(poller(interval, timeout), condition)
}
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return WaitFor(wait, condition, done)
return WaitFor(wait, condition, NeverStop)
}
// PollImmediate is identical to Poll, except that it performs the first check
// immediately, not waiting interval beforehand.
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return pollImmediateInternal(poller(interval, timeout), condition)
}
@ -185,16 +212,24 @@ func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
return pollInternal(wait, condition)
}
// PollInfinite polls forever.
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return PollUntil(interval, condition, done)
}
// PollImmediateInfinite is identical to PollInfinite, except that it
// performs the first check immediately, not waiting interval
// beforehand.
// PollImmediateInfinite tries a condition func until it returns true or an error
//
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
done, err := condition()
if err != nil {
@ -206,7 +241,11 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro
return PollInfinite(interval, condition)
}
// PollUntil is like Poll, but it takes a stop change instead of total duration
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PolUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return WaitFor(poller(interval, 0), condition, stopCh)
}
@ -215,11 +254,16 @@ func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan st
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
// WaitFor gets a channel from wait(), and then invokes fn once for every value
// placed on the channel and once more when the channel is closed. If fn
// returns an error the loop ends and that error is returned, and if fn returns
// true the loop ends and nil is returned. ErrWaitTimeout will be returned if
// the channel is closed without fn ever returning true.
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
// placed on the channel and once more when the channel is closed.
//
// If 'fn' returns an error the loop ends and that error is returned, and if
// 'fn' returns true the loop ends and nil is returned.
//
// ErrWaitTimeout will be returned if the channel is closed without fn ever
// returning true.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
c := wait(done)
for {
@ -238,11 +282,14 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
return ErrWaitTimeout
}
// poller returns a WaitFunc that will send to the channel every
// interval until timeout has elapsed and then close the channel.
// Over very short intervals you may receive no ticks before
// the channel is closed. If timeout is 0, the channel
// will never be closed.
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
func poller(interval, timeout time.Duration) WaitFunc {
return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
ch := make(chan struct{})

View file

@ -24,7 +24,7 @@ import (
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
type DelayingInterface interface {
Interface
@ -68,6 +68,9 @@ type delayingType struct {
stopCh chan struct{}
// heartbeat ensures we wait no more than maxWait before firing
//
// TODO: replace with Ticker (and add to clock) so this can be cleaned up.
// clock.Tick will leak.
heartbeat <-chan time.Time
// waitingForAdd is an ordered slice of items to be added to the contained work queue
@ -115,7 +118,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
}
}
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
// expired item sitting for more than 10 seconds.
const maxWait = 10 * time.Second
@ -192,6 +195,9 @@ func (q *delayingType) waitingLoop() {
// inserts the given entry into the sorted entries list
// same semantics as append()... the given slice may be modified,
// and the returned value should be used
//
// TODO: This should probably be converted to use container/heap to improve
// running time for a large number of items.
func insert(entries []waitFor, knownEntries map[t]time.Time, entry waitFor) []waitFor {
// if the entry is already in our retry list and the existing time is before the new one, just skip it
existingTime, exists := knownEntries[entry.data]

Some files were not shown because too many files have changed in this diff Show more