AUTH-2105: Adds support for local forwarding. Refactor auditlogger creation.

AUTH-2088: Adds dynamic destination routing
This commit is contained in:
Michael Borkenstein
2019-10-02 15:56:28 -05:00
parent dbde3870da
commit 91d9dca34e
669 changed files with 74279 additions and 18300 deletions

6
vendor/github.com/prometheus/procfs/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,6 @@
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
linters:
enable:
- staticcheck
- govet
disable-all: true

View File

@@ -1,2 +1,2 @@
* Tobias Schmidt <tobidt@gmail.com> @grobie
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
* Paul Gier <pgier@redhat.com> @pgier

View File

@@ -14,6 +14,7 @@
include Makefile.common
%/.unpacked: %.ttar
@echo ">> extracting fixtures"
./ttar -C $(dir $*) -x -f $*.ttar
touch $@

View File

@@ -69,23 +69,24 @@ else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif
PROMU_VERSION ?= 0.3.0
PROMU_VERSION ?= 0.4.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
STATICCHECK :=
# staticcheck only supports linux, freebsd, darwin and windows platforms on i386/amd64
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.16.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin))
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
STATICCHECK_VERSION ?= 2019.1
STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH)
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
endif
endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKERFILE_PATH ?= ./
DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64
@@ -107,7 +108,7 @@ endif
%: common-% ;
.PHONY: common-all
common-all: precheck style check_license staticcheck unused build test
common-all: precheck style check_license lint unused build test
.PHONY: common-style
common-style:
@@ -159,21 +160,24 @@ common-vet:
@echo ">> vetting code"
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
.PHONY: common-staticcheck
common-staticcheck: $(STATICCHECK)
ifdef STATICCHECK
@echo ">> running staticcheck"
chmod +x $(STATICCHECK)
.PHONY: common-lint
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
ifdef GO111MODULE
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
else
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
$(GOLANGCI_LINT) run $(pkgs)
endif
endif
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint
.PHONY: common-unused
common-unused: $(GOVENDOR)
ifdef GOVENDOR
@@ -209,7 +213,7 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
.
$(DOCKERFILE_PATH)
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
@@ -241,10 +245,12 @@ proto:
@echo ">> generating code from proto files"
@./scripts/genproto.sh
ifdef STATICCHECK
$(STATICCHECK):
ifdef GOLANGCI_LINT
$(GOLANGCI_LINT):
mkdir -p $(FIRST_GOPATH)/bin
curl -s -L $(STATICCHECK_URL) > $(STATICCHECK)
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
| sed -e '/install -d/d' \
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif
ifdef GOVENDOR

View File

@@ -1,7 +1,7 @@
# procfs
This procfs package provides functions to retrieve system, kernel and process
metrics from the pseudo-filesystem proc.
metrics from the pseudo-filesystems /proc and /sys.
*WARNING*: This package is a work in progress. Its API may still break in
backwards-incompatible ways without warnings. Use it at your own risk.
@@ -9,3 +9,45 @@ backwards-incompatible ways without warnings. Use it at your own risk.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
## Usage
The procfs library is organized by packages based on whether the gathered data is coming from
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
point is initialized, and then the stat information is read.
```go
fs, err := procfs.NewFS("/proc")
stats, err := fs.Stat()
```
Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
```go
fs, err := blockdevice.NewFS("/proc", "/sys")
stats, err := fs.ProcDiskstats()
```
## Building and Testing
The procfs library is normally built as part of another application. However, when making
changes to the library, the `make test` command can be used to run the API test suite.
### Updating Test Fixtures
The procfs library includes a set of test fixtures which include many example files from
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
which is extracted automatically during testing. To add/update the test fixtures, first
ensure the `fixtures` directory is up to date by removing the existing directory and then
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
```bash
rm -rf fixtures
make test
```
Next, make the required changes to the extracted files in the `fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
`git diff fixtures.ttar`.

85
vendor/github.com/prometheus/procfs/arp.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"io/ioutil"
"net"
"strings"
)
// ARPEntry contains a single row of the columnar data represented in
// /proc/net/arp.
type ARPEntry struct {
// IP address
IPAddr net.IP
// MAC address
HWAddr net.HardwareAddr
// Name of the device
Device string
}
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
// and then return a slice of ARPEntry's.
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
}
func parseARPEntries(data []byte) ([]ARPEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]ARPEntry, 0)
var err error
const (
expectedDataWidth = 6
expectedHeaderWidth = 9
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == expectedHeaderWidth || width == 0 {
continue
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
}
entries = append(entries, entry)
} else {
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
}
}
return entries, err
}
func parseARPEntry(columns []string) (ARPEntry, error) {
ip := net.ParseIP(columns[0])
mac := net.HardwareAddr(columns[3])
entry := ARPEntry{
IPAddr: ip,
HWAddr: mac,
Device: columns[5],
}
return entry, nil
}

View File

@@ -31,19 +31,9 @@ type BuddyInfo struct {
Sizes []float64
}
// NewBuddyInfo reads the buddyinfo statistics.
func NewBuddyInfo() ([]BuddyInfo, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewBuddyInfo()
}
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.Path("buddyinfo"))
// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.proc.Path("buddyinfo"))
if err != nil {
return nil, err
}

166
vendor/github.com/prometheus/procfs/cpuinfo.go generated vendored Normal file
View File

@@ -0,0 +1,166 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"io/ioutil"
"strconv"
"strings"
)
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
type CPUInfo struct {
Processor uint
VendorID string
CPUFamily string
Model string
ModelName string
Stepping string
Microcode string
CPUMHz float64
CacheSize string
PhysicalID string
Siblings uint
CoreID string
CPUCores uint
APICID string
InitialAPICID string
FPU string
FPUException string
CPUIDLevel uint
WP string
Flags []string
Bugs []string
BogoMips float64
CLFlushSize uint
CacheAlignment uint
AddressSizes string
PowerManagement string
}
// CPUInfo returns information about current system CPUs.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) CPUInfo() ([]CPUInfo, error) {
data, err := ioutil.ReadFile(fs.proc.Path("cpuinfo"))
if err != nil {
return nil, err
}
return parseCPUInfo(data)
}
// parseCPUInfo parses data from /proc/cpuinfo
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
cpuinfo := []CPUInfo{}
i := -1
scanner := bufio.NewScanner(bytes.NewReader(info))
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) == "" {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "vendor_id":
cpuinfo[i].VendorID = field[1]
case "cpu family":
cpuinfo[i].CPUFamily = field[1]
case "model":
cpuinfo[i].Model = field[1]
case "model name":
cpuinfo[i].ModelName = field[1]
case "stepping":
cpuinfo[i].Stepping = field[1]
case "microcode":
cpuinfo[i].Microcode = field[1]
case "cpu MHz":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
case "cache size":
cpuinfo[i].CacheSize = field[1]
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "apicid":
cpuinfo[i].APICID = field[1]
case "initial apicid":
cpuinfo[i].InitialAPICID = field[1]
case "fpu":
cpuinfo[i].FPU = field[1]
case "fpu_exception":
cpuinfo[i].FPUException = field[1]
case "cpuid level":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUIDLevel = uint(v)
case "wp":
cpuinfo[i].WP = field[1]
case "flags":
cpuinfo[i].Flags = strings.Fields(field[1])
case "bugs":
cpuinfo[i].Bugs = strings.Fields(field[1])
case "bogomips":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
case "clflush size":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CLFlushSize = uint(v)
case "cache_alignment":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CacheAlignment = uint(v)
case "address sizes":
cpuinfo[i].AddressSizes = field[1]
case "power management":
cpuinfo[i].PowerManagement = field[1]
}
}
return cpuinfo, nil
}

131
vendor/github.com/prometheus/procfs/crypto.go generated vendored Normal file
View File

@@ -0,0 +1,131 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bytes"
"fmt"
"io/ioutil"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Crypto holds info parsed from /proc/crypto.
type Crypto struct {
Alignmask *uint64
Async bool
Blocksize *uint64
Chunksize *uint64
Ctxsize *uint64
Digestsize *uint64
Driver string
Geniv string
Internal string
Ivsize *uint64
Maxauthsize *uint64
MaxKeysize *uint64
MinKeysize *uint64
Module string
Name string
Priority *int64
Refcnt *int64
Seedsize *uint64
Selftest string
Type string
Walksize *uint64
}
// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
// structs containing the relevant info. More information available here:
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
func (fs FS) Crypto() ([]Crypto, error) {
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
}
crypto, err := parseCrypto(data)
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
}
return crypto, nil
}
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
crypto := []Crypto{}
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
for _, block := range cryptoBlocks {
var newCryptoElem Crypto
lines := strings.Split(string(block), "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" || line[0] == ' ' {
continue
}
fields := strings.Split(line, ":")
key := strings.TrimSpace(fields[0])
value := strings.TrimSpace(fields[1])
vp := util.NewValueParser(value)
switch strings.TrimSpace(key) {
case "async":
b, err := strconv.ParseBool(value)
if err == nil {
newCryptoElem.Async = b
}
case "blocksize":
newCryptoElem.Blocksize = vp.PUInt64()
case "chunksize":
newCryptoElem.Chunksize = vp.PUInt64()
case "digestsize":
newCryptoElem.Digestsize = vp.PUInt64()
case "driver":
newCryptoElem.Driver = value
case "geniv":
newCryptoElem.Geniv = value
case "internal":
newCryptoElem.Internal = value
case "ivsize":
newCryptoElem.Ivsize = vp.PUInt64()
case "maxauthsize":
newCryptoElem.Maxauthsize = vp.PUInt64()
case "max keysize":
newCryptoElem.MaxKeysize = vp.PUInt64()
case "min keysize":
newCryptoElem.MinKeysize = vp.PUInt64()
case "module":
newCryptoElem.Module = value
case "name":
newCryptoElem.Name = value
case "priority":
newCryptoElem.Priority = vp.PInt64()
case "refcnt":
newCryptoElem.Refcnt = vp.PInt64()
case "seedsize":
newCryptoElem.Seedsize = vp.PUInt64()
case "selftest":
newCryptoElem.Selftest = value
case "type":
newCryptoElem.Type = value
case "walksize":
newCryptoElem.Walksize = vp.PUInt64()
}
}
crypto = append(crypto, newCryptoElem)
}
return crypto, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -14,33 +14,30 @@
package procfs
import (
"fmt"
"os"
"path"
"github.com/prometheus/procfs/internal/fs"
)
// FS represents the pseudo-filesystem proc, which provides an interface to
// FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures.
type FS string
type FS struct {
proc fs.FS
}
// DefaultMountPoint is the common mount point of the proc filesystem.
const DefaultMountPoint = "/proc"
const DefaultMountPoint = fs.DefaultProcMountPoint
// NewFS returns a new FS mounted under the given mountPoint. It will error
// if the mount point can't be read.
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
// It will error if the mount point directory can't be read or is a file.
func NewDefaultFS() (FS, error) {
return NewFS(DefaultMountPoint)
}
// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
// if the mount point directory can't be read or is a file.
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
fs, err := fs.NewFS(mountPoint)
if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
return FS{}, err
}
if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
}
return FS(mountPoint), nil
}
// Path returns the path of the given subsystem relative to the procfs root.
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
return FS{fs}, nil
}

View File

@@ -1,3 +1,6 @@
module github.com/prometheus/procfs
require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
require (
github.com/google/go-cmp v0.3.0
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
)

View File

@@ -1,2 +1,4 @@
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

55
vendor/github.com/prometheus/procfs/internal/fs/fs.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fs
import (
"fmt"
"os"
"path/filepath"
)
const (
// DefaultProcMountPoint is the common mount point of the proc filesystem.
DefaultProcMountPoint = "/proc"
// DefaultSysMountPoint is the common mount point of the sys filesystem.
DefaultSysMountPoint = "/sys"
// DefaultConfigfsMountPoint is the commont mount point of the configfs
DefaultConfigfsMountPoint = "/sys/kernel/config"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
// interface to kernel data structures.
type FS string
// NewFS returns a new FS mounted under the given mountPoint. It will error
// if the mount point can't be read.
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
}
if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
}
return FS(mountPoint), nil
}
// Path appends the given path elements to the filesystem path, adding separators
// as necessary.
func (fs FS) Path(p ...string) string {
return filepath.Join(append([]string{string(fs)}, p...)...)
}

View File

@@ -0,0 +1,88 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io/ioutil"
"strconv"
"strings"
)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}
// ParseUint64s parses a slice of strings into a slice of uint64s.
func ParseUint64s(ss []string) ([]uint64, error) {
us := make([]uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, u)
}
return us, nil
}
// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
func ParsePInt64s(ss []string) ([]*int64, error) {
us := make([]*int64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, &u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
}
// ParseBool parses a string into a boolean pointer.
func ParseBool(b string) *bool {
var truth bool
switch b {
case "enabled":
truth = true
case "disabled":
truth = false
default:
return nil
}
return &truth
}

View File

@@ -0,0 +1,45 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,!appengine
package util
import (
"bytes"
"os"
"syscall"
)
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
func SysReadFile(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
// Go's ioutil.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
b := make([]byte, 128)
n, err := syscall.Read(int(f.Fd()), b)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(b[:n])), nil
}

View File

@@ -0,0 +1,26 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,appengine !linux
package util
import (
"fmt"
)
// SysReadFile is here implemented as a noop for builds that do not support
// the read syscall. For example Windows, or Linux on Google App Engine.
func SysReadFile(file string) (string, error) {
return "", fmt.Errorf("not supported on this platform")
}

View File

@@ -0,0 +1,77 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"strconv"
)
// TODO(mdlayher): util packages are an anti-pattern and this should be moved
// somewhere else that is more focused in the future.
// A ValueParser enables parsing a single string into a variety of data types
// in a concise and safe way. The Err method must be invoked after invoking
// any other methods to ensure a value was successfully parsed.
type ValueParser struct {
v string
err error
}
// NewValueParser creates a ValueParser using the input string.
func NewValueParser(v string) *ValueParser {
return &ValueParser{v: v}
}
// PInt64 interprets the underlying value as an int64 and returns a pointer to
// that value.
func (vp *ValueParser) PInt64() *int64 {
if vp.err != nil {
return nil
}
// A base value of zero makes ParseInt infer the correct base using the
// string's prefix, if any.
const base = 0
v, err := strconv.ParseInt(vp.v, base, 64)
if err != nil {
vp.err = err
return nil
}
return &v
}
// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
// that value.
func (vp *ValueParser) PUInt64() *uint64 {
if vp.err != nil {
return nil
}
// A base value of zero makes ParseInt infer the correct base using the
// string's prefix, if any.
const base = 0
v, err := strconv.ParseUint(vp.v, base, 64)
if err != nil {
vp.err = err
return nil
}
return &v
}
// Err returns the last error, if any, encountered by the ValueParser.
func (vp *ValueParser) Err() error {
return vp.err
}

View File

@@ -62,19 +62,9 @@ type IPVSBackendStatus struct {
Weight uint64
}
// NewIPVSStats reads the IPVS statistics.
func NewIPVSStats() (IPVSStats, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return IPVSStats{}, err
}
return fs.NewIPVSStats()
}
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) NewIPVSStats() (IPVSStats, error) {
file, err := os.Open(fs.Path("net/ip_vs_stats"))
// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) IPVSStats() (IPVSStats, error) {
file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
@@ -131,19 +121,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
return stats, nil
}
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return []IPVSBackendStatus{}, err
}
return fs.NewIPVSBackendStatus()
}
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
file, err := os.Open(fs.Path("net/ip_vs"))
// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
file, err := os.Open(fs.proc.Path("net/ip_vs"))
if err != nil {
return nil, err
}

View File

@@ -22,8 +22,8 @@ import (
)
var (
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
)
// MDStat holds info parsed from /proc/mdstat.
@@ -34,117 +34,160 @@ type MDStat struct {
ActivityState string
// Number of active disks.
DisksActive int64
// Total number of disks the device consists of.
// Total number of disks the device requires.
DisksTotal int64
// Number of failed disks.
DisksFailed int64
// Spare disks in the device.
DisksSpare int64
// Number of blocks the device holds.
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
}
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
mdStatusFilePath := fs.Path("mdstat")
content, err := ioutil.ReadFile(mdStatusFilePath)
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
// structs containing the relevant info. More information available here:
// https://raid.wiki.kernel.org/index.php/Mdstat
func (fs FS) MDStat() ([]MDStat, error) {
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
if err != nil {
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
mdStates := []MDStat{}
lines := strings.Split(string(content), "\n")
for i, l := range lines {
if l == "" {
continue
}
if l[0] == ' ' {
continue
}
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
// structs containing the relevant info.
func parseMDStat(mdStatData []byte) ([]MDStat, error) {
mdStats := []MDStat{}
lines := strings.Split(string(mdStatData), "\n")
for i, line := range lines {
if strings.TrimSpace(line) == "" || line[0] == ' ' ||
strings.HasPrefix(line, "Personalities") ||
strings.HasPrefix(line, "unused") {
continue
}
mainLine := strings.Split(l, " ")
if len(mainLine) < 3 {
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
}
mdName := mainLine[0]
activityState := mainLine[2]
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
return mdStates, fmt.Errorf(
"error parsing %s: too few lines for md device %s",
mdStatusFilePath,
return nil, fmt.Errorf(
"error parsing %s: too few lines for md device",
mdName,
)
}
active, total, size, err := evalStatusline(lines[i+1])
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
fail := int64(strings.Count(line, "(F)"))
spare := int64(strings.Count(line, "(S)"))
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
return nil, fmt.Errorf("error parsing md device lines: %s", err)
}
// j is the line number of the syncing-line.
j := i + 2
syncLineIdx := i + 2
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
j = i + 3
syncLineIdx++
}
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
syncedBlocks, err = evalBuildline(lines[j])
if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync")
// Append recovery and resyncing state info.
if recovering || resyncing {
if recovering {
state = "recovering"
} else {
state = "resyncing"
}
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0
} else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
}
}
}
mdStates = append(mdStates, MDStat{
mdStats = append(mdStats, MDStat{
Name: mdName,
ActivityState: activityState,
ActivityState: state,
DisksActive: active,
DisksFailed: fail,
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
})
}
return mdStates, nil
return mdStats, nil
}
func evalStatusline(statusline string) (active, total, size int64, err error) {
matches := statuslineRE.FindStringSubmatch(statusline)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
size, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
// In the device deviceLine, only disks have a number associated with them in [].
total = int64(strings.Count(deviceLine, "["))
return total, total, size, nil
}
if strings.Contains(deviceLine, "inactive") {
return 0, 0, size, nil
}
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
return active, total, size, nil
}
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
matches := buildlineRE.FindStringSubmatch(buildline)
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
}
return syncedBlocks, nil

178
vendor/github.com/prometheus/procfs/mountinfo.go generated vendored Normal file
View File

@@ -0,0 +1,178 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
var validOptionalFields = map[string]bool{
"shared": true,
"master": true,
"propagate_from": true,
"unbindable": true,
}
// A MountInfo is a type that describes the details, options
// for each mount, parsed from /proc/self/mountinfo.
// The fields described in each entry of /proc/self/mountinfo
// is described in the following man page.
// http://man7.org/linux/man-pages/man5/proc.5.html
type MountInfo struct {
// Unique Id for the mount
MountId int
// The Id of the parent mount
ParentId int
// The value of `st_dev` for the files on this FS
MajorMinorVer string
// The pathname of the directory in the FS that forms
// the root for this mount
Root string
// The pathname of the mount point relative to the root
MountPoint string
// Mount options
Options map[string]string
// Zero or more optional fields
OptionalFields map[string]string
// The Filesystem type
FSType string
// FS specific information or "none"
Source string
// Superblock options
SuperOptions map[string]string
}
// Returns part of the mountinfo line, if it exists, else an empty string.
func getStringSliceElement(parts []string, idx int, defaultValue string) string {
if idx >= len(parts) {
return defaultValue
}
return parts[idx]
}
// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
func parseMountInfo(r io.Reader) ([]*MountInfo, error) {
mounts := []*MountInfo{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
mountString := scanner.Text()
parsedMounts, err := parseMountInfoString(mountString)
if err != nil {
return nil, err
}
mounts = append(mounts, parsedMounts)
}
err := scanner.Err()
return mounts, err
}
// Parses a mountinfo file line, and converts it to a MountInfo struct.
// An important check here is to see if the hyphen separator, as if it does not exist,
// it means that the line is malformed.
func parseMountInfoString(mountString string) (*MountInfo, error) {
var err error
// OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots
separatorIndex := strings.Index(mountString, "-")
if separatorIndex == -1 {
return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString)
}
beforeFields := strings.Fields(mountString[:separatorIndex])
afterFields := strings.Fields(mountString[separatorIndex+1:])
if (len(beforeFields) + len(afterFields)) < 7 {
return nil, fmt.Errorf("too few fields")
}
mount := &MountInfo{
MajorMinorVer: getStringSliceElement(beforeFields, 2, ""),
Root: getStringSliceElement(beforeFields, 3, ""),
MountPoint: getStringSliceElement(beforeFields, 4, ""),
Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")),
OptionalFields: nil,
FSType: getStringSliceElement(afterFields, 0, ""),
Source: getStringSliceElement(afterFields, 1, ""),
SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")),
}
mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, ""))
if err != nil {
return nil, fmt.Errorf("failed to parse mount ID")
}
mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, ""))
if err != nil {
return nil, fmt.Errorf("failed to parse parent ID")
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if len(beforeFields) > 6 {
mount.OptionalFields = make(map[string]string)
optionalFields := beforeFields[6:]
for _, field := range optionalFields {
optionSplit := strings.Split(field, ":")
target, value := optionSplit[0], ""
if len(optionSplit) == 2 {
value = optionSplit[1]
}
// Checks if the 'keys' in the optional fields in the mountinfo line are acceptable.
// Allowed 'keys' are shared, master, propagate_from, unbindable.
if _, ok := validOptionalFields[target]; ok {
mount.OptionalFields[target] = value
}
}
}
return mount, nil
}
// Parses the mount options, superblock options.
func mountOptionsParser(mountOptions string) map[string]string {
opts := make(map[string]string)
options := strings.Split(mountOptions, ",")
for _, opt := range options {
splitOption := strings.Split(opt, "=")
if len(splitOption) < 2 {
key := splitOption[0]
opts[key] = ""
} else {
key, value := splitOption[0], splitOption[1]
opts[key] = value
}
}
return opts
}
// Retrieves mountinfo information from `/proc/self/mountinfo`.
func GetMounts() ([]*MountInfo, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
}
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
func GetProcMounts(pid int) ([]*MountInfo, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
}

View File

@@ -69,8 +69,8 @@ type MountStats interface {
type MountStatsNFS struct {
// The version of statistics provided.
StatVersion string
// The optional mountaddr of the NFS mount.
MountAddress string
// The mount options of the NFS mount.
Opts map[string]string
// The age of the NFS mount.
Age time.Duration
// Statistics related to byte counters for various operations.
@@ -181,11 +181,11 @@ type NFSOperationStats struct {
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived uint64
// Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration
CumulativeQueueMilliseconds uint64
// Duration it took to get a reply back after the request was transmitted.
CumulativeTotalResponseTime time.Duration
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestTime time.Duration
CumulativeTotalRequestMilliseconds uint64
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
@@ -204,7 +204,7 @@ type NFSTransportStats struct {
// spent waiting for connections to the server to be established.
ConnectIdleTime uint64
// Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration
IdleTimeSeconds uint64
// Number of RPC requests for this mount sent to the NFS server.
Sends uint64
// Number of RPC responses for this mount received from the NFS server.
@@ -342,10 +342,15 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
switch ss[0] {
case fieldOpts:
if stats.Opts == nil {
stats.Opts = map[string]string{}
}
for _, opt := range strings.Split(ss[1], ",") {
split := strings.Split(opt, "=")
if len(split) == 2 && split[0] == "mountaddr" {
stats.MountAddress = split[1]
if len(split) == 2 {
stats.Opts[split[0]] = split[1]
} else {
stats.Opts[opt] = ""
}
}
case fieldAge:
@@ -519,15 +524,15 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
}
ops = append(ops, NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
MajorTimeouts: ns[2],
BytesSent: ns[3],
BytesReceived: ns[4],
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
MajorTimeouts: ns[2],
BytesSent: ns[3],
BytesReceived: ns[4],
CumulativeQueueMilliseconds: ns[5],
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
})
}
@@ -603,7 +608,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
Bind: ns[1],
Connect: ns[2],
ConnectIdleTime: ns[3],
IdleTime: time.Duration(ns[4]) * time.Second,
IdleTimeSeconds: ns[4],
Sends: ns[5],
Receives: ns[6],
BadTransactionIDs: ns[7],

View File

@@ -47,23 +47,13 @@ type NetDevLine struct {
// are interface names.
type NetDev map[string]NetDevLine
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
func NewNetDev() (NetDev, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetDev()
// NetDev returns kernel/system statistics read from /proc/net/dev.
func (fs FS) NetDev() (NetDev, error) {
return newNetDev(fs.proc.Path("net/dev"))
}
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
func (fs FS) NewNetDev() (NetDev, error) {
return newNetDev(fs.Path("net/dev"))
}
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
func (p Proc) NewNetDev() (NetDev, error) {
// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
func (p Proc) NetDev() (NetDev, error) {
return newNetDev(p.path("net/dev"))
}
@@ -75,7 +65,7 @@ func newNetDev(file string) (NetDev, error) {
}
defer f.Close()
nd := NetDev{}
netDev := NetDev{}
s := bufio.NewScanner(f)
for n := 0; s.Scan(); n++ {
// Skip the 2 header lines.
@@ -83,20 +73,20 @@ func newNetDev(file string) (NetDev, error) {
continue
}
line, err := nd.parseLine(s.Text())
line, err := netDev.parseLine(s.Text())
if err != nil {
return nd, err
return netDev, err
}
nd[line.Name] = *line
netDev[line.Name] = *line
}
return nd, s.Err()
return netDev, s.Err()
}
// parseLine parses a single line from the /proc/net/dev file. Header lines
// must be filtered prior to calling this method.
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
parts := strings.SplitN(rawLine, ":", 2)
if len(parts) != 2 {
return nil, errors.New("invalid net/dev line, missing colon")
@@ -185,11 +175,11 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
// Total aggregates the values across interfaces and returns a new NetDevLine.
// The Name field will be a sorted comma separated list of interface names.
func (nd NetDev) Total() NetDevLine {
func (netDev NetDev) Total() NetDevLine {
total := NetDevLine{}
names := make([]string, 0, len(nd))
for _, ifc := range nd {
names := make([]string, 0, len(netDev))
for _, ifc := range netDev {
names = append(names, ifc.Name)
total.RxBytes += ifc.RxBytes
total.RxPackets += ifc.RxPackets

91
vendor/github.com/prometheus/procfs/net_softnet.go generated vendored Normal file
View File

@@ -0,0 +1,91 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
type SoftnetEntry struct {
// Number of processed packets
Processed uint
// Number of dropped packets
Dropped uint
// Number of times processing packets ran out of quota
TimeSqueezed uint
}
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
// and then return a slice of SoftnetEntry's.
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
if err != nil {
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
}
return parseSoftnetEntries(data)
}
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]SoftnetEntry, 0)
var err error
const (
expectedColumns = 11
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == 0 {
continue
}
if width != expectedColumns {
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
}
var entry SoftnetEntry
if entry, err = parseSoftnetEntry(columns); err != nil {
return []SoftnetEntry{}, err
}
entries = append(entries, entry)
}
return entries, nil
}
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
var err error
var processed, dropped, timeSqueezed uint64
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
}
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
}
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
}
return SoftnetEntry{
Processed: uint(processed),
Dropped: uint(dropped),
TimeSqueezed: uint(timeSqueezed),
}, nil
}

275
vendor/github.com/prometheus/procfs/net_unix.go generated vendored Normal file
View File

@@ -0,0 +1,275 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
const (
netUnixKernelPtrIdx = iota
netUnixRefCountIdx
_
netUnixFlagsIdx
netUnixTypeIdx
netUnixStateIdx
netUnixInodeIdx
// Inode and Path are optional.
netUnixStaticFieldsCnt = 6
)
const (
netUnixTypeStream = 1
netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1
netUnixStateConnecting = 2
netUnixStateConnected = 3
netUnixStateDisconnected = 4
)
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
// NetUnixType is the type of the type field.
type NetUnixType uint64
// NetUnixFlags is the type of the flags field.
type NetUnixFlags uint64
// NetUnixState is the type of the state field.
type NetUnixState uint64
// NetUnixLine represents a line of /proc/net/unix.
type NetUnixLine struct {
KernelPtr string
RefCount uint64
Protocol uint64
Flags NetUnixFlags
Type NetUnixType
State NetUnixState
Inode uint64
Path string
}
// NetUnix holds the data read from /proc/net/unix.
type NetUnix struct {
Rows []*NetUnixLine
}
// NewNetUnix returns data read from /proc/net/unix.
func NewNetUnix() (*NetUnix, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetUnix()
}
// NewNetUnix returns data read from /proc/net/unix.
func (fs FS) NewNetUnix() (*NetUnix, error) {
return NewNetUnixByPath(fs.proc.Path("net/unix"))
}
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByPath(path string) (*NetUnix, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return NewNetUnixByReader(f)
}
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
nu := &NetUnix{
Rows: make([]*NetUnixLine, 0, 32),
}
scanner := bufio.NewScanner(reader)
// Omit the header line.
scanner.Scan()
header := scanner.Text()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists.
// This code works for both cases.
hasInode := strings.Contains(header, "Inode")
minFieldsCnt := netUnixStaticFieldsCnt
if hasInode {
minFieldsCnt++
}
for scanner.Scan() {
line := scanner.Text()
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
if err != nil {
return nu, err
}
nu.Rows = append(nu.Rows, item)
}
return nu, scanner.Err()
}
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
fields := strings.Fields(line)
fieldsLen := len(fields)
if fieldsLen < minFieldsCnt {
return nil, fmt.Errorf(
"Parse Unix domain failed: expect at least %d fields but got %d",
minFieldsCnt, fieldsLen)
}
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
}
users, err := u.parseUsers(fields[netUnixRefCountIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
}
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
}
typ, err := u.parseType(fields[netUnixTypeIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
}
state, err := u.parseState(fields[netUnixStateIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
}
var inode uint64
if hasInode {
inodeStr := fields[netUnixInodeIdx]
inode, err = u.parseInode(inodeStr)
if err != nil {
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
}
}
nuLine := &NetUnixLine{
KernelPtr: kernelPtr,
RefCount: users,
Type: typ,
Flags: flags,
State: state,
Inode: inode,
}
// Path field is optional.
if fieldsLen > minFieldsCnt {
pathIdx := netUnixInodeIdx + 1
if !hasInode {
pathIdx--
}
nuLine.Path = fields[pathIdx]
}
return nuLine, nil
}
func (u NetUnix) parseKernelPtr(str string) (string, error) {
if !strings.HasSuffix(str, ":") {
return "", errInvalidKernelPtrFmt
}
return str[:len(str)-1], nil
}
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
if err != nil {
return 0, err
}
return NetUnixType(typ), nil
}
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
flags, err := strconv.ParseUint(hexStr, 16, 32)
if err != nil {
return 0, err
}
return NetUnixFlags(flags), nil
}
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
st, err := strconv.ParseInt(hexStr, 16, 8)
if err != nil {
return 0, err
}
return NetUnixState(st), nil
}
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
return strconv.ParseUint(inodeStr, 10, 64)
}
func (t NetUnixType) String() string {
switch t {
case netUnixTypeStream:
return "stream"
case netUnixTypeDgram:
return "dgram"
case netUnixTypeSeqpacket:
return "seqpacket"
}
return "unknown"
}
func (f NetUnixFlags) String() string {
switch f {
case netUnixFlagListen:
return "listen"
default:
return "default"
}
}
func (s NetUnixState) String() string {
switch s {
case netUnixStateUnconnected:
return "unconnected"
case netUnixStateConnecting:
return "connecting"
case netUnixStateConnected:
return "connected"
case netUnixStateDisconnected:
return "disconnected"
}
return "unknown"
}

View File

@@ -20,6 +20,8 @@ import (
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/fs"
)
// Proc provides information about a running process.
@@ -27,7 +29,7 @@ type Proc struct {
// The process ID.
PID int
fs FS
fs fs.FS
}
// Procs represents a list of Proc structs.
@@ -52,7 +54,7 @@ func NewProc(pid int) (Proc, error) {
if err != nil {
return Proc{}, err
}
return fs.NewProc(pid)
return fs.Proc(pid)
}
// AllProcs returns a list of all currently available processes under /proc.
@@ -66,28 +68,35 @@ func AllProcs() (Procs, error) {
// Self returns a process for the current process.
func (fs FS) Self() (Proc, error) {
p, err := os.Readlink(fs.Path("self"))
p, err := os.Readlink(fs.proc.Path("self"))
if err != nil {
return Proc{}, err
}
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
if err != nil {
return Proc{}, err
}
return fs.NewProc(pid)
return fs.Proc(pid)
}
// NewProc returns a process for the given pid.
//
// Deprecated: use fs.Proc() instead
func (fs FS) NewProc(pid int) (Proc, error) {
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
return fs.Proc(pid)
}
// Proc returns a process for the given pid.
func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
return Proc{PID: pid, fs: fs}, nil
return Proc{PID: pid, fs: fs.proc}, nil
}
// AllProcs returns a list of all currently available processes.
func (fs FS) AllProcs() (Procs, error) {
d, err := os.Open(fs.Path())
d, err := os.Open(fs.proc.Path())
if err != nil {
return Procs{}, err
}
@@ -104,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil {
continue
}
p = append(p, Proc{PID: int(pid), fs: fs})
p = append(p, Proc{PID: int(pid), fs: fs.proc})
}
return p, nil
@@ -238,6 +247,20 @@ func (p Proc) MountStats() ([]*Mount, error) {
return parseMountStats(f)
}
// MountInfo retrieves mount information for mount points in a
// process's namespace.
// It supplies information missing in `/proc/self/mounts` and
// fixes various other problems with that file too.
func (p Proc) MountInfo() ([]*MountInfo, error) {
f, err := os.Open(p.path("mountinfo"))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {
@@ -256,3 +279,33 @@ func (p Proc) fileDescriptors() ([]string, error) {
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of
// the process.
func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
names, err := p.fileDescriptors()
if err != nil {
return nil, err
}
var fdinfos ProcFDInfos
for _, n := range names {
fdinfo, err := p.FDInfo(n)
if err != nil {
continue
}
fdinfos = append(fdinfos, *fdinfo)
}
return fdinfos, nil
}
// Schedstat returns task scheduling information for the process.
func (p Proc) Schedstat() (ProcSchedstat, error) {
contents, err := ioutil.ReadFile(p.path("schedstat"))
if err != nil {
return ProcSchedstat{}, err
}
return parseProcSchedstat(string(contents))
}

43
vendor/github.com/prometheus/procfs/proc_environ.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"io/ioutil"
"os"
"strings"
)
// Environ reads process environments from /proc/<pid>/environ
func (p Proc) Environ() ([]string, error) {
environments := make([]string, 0)
f, err := os.Open(p.path("environ"))
if err != nil {
return environments, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return environments, err
}
environments = strings.Split(string(data), "\000")
if len(environments) > 0 {
environments = environments[:len(environments)-1]
}
return environments, nil
}

132
vendor/github.com/prometheus/procfs/proc_fdinfo.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
)
// Regexp variables
var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
)
// ProcFDInfo contains represents file descriptor information.
type ProcFDInfo struct {
// File descriptor
FD string
// File offset
Pos string
// File access mode and status flags
Flags string
// Mount point ID
MntID string
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
f, err := os.Open(p.path("fdinfo", fd))
if err != nil {
return nil, err
}
defer f.Close()
fdinfo, err := ioutil.ReadAll(f)
if err != nil {
return nil, fmt.Errorf("could not read %s: %s", f.Name(), err)
}
var text, pos, flags, mntid string
var inotify []InotifyInfo
scanner := bufio.NewScanner(strings.NewReader(string(fdinfo)))
for scanner.Scan() {
text = scanner.Text()
if rPos.MatchString(text) {
pos = rPos.FindStringSubmatch(text)[1]
} else if rFlags.MatchString(text) {
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
return nil, err
}
inotify = append(inotify, *newInotify)
}
}
i := &ProcFDInfo{
FD: fd,
Pos: pos,
Flags: flags,
MntID: mntid,
InotifyInfos: inotify,
}
return i, nil
}
// InotifyInfo represents a single inotify line in the fdinfo file.
type InotifyInfo struct {
// Watch descriptor number
WD string
// Inode number
Ino string
// Device ID
Sdev string
// Mask of events being monitored
Mask string
}
// InotifyInfo constructor. Only available on kernel 3.8+.
func parseInotifyInfo(line string) (*InotifyInfo, error) {
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
m := r.FindStringSubmatch(line)
i := &InotifyInfo{
WD: m[1],
Ino: m[2],
Sdev: m[3],
Mask: m[4],
}
return i, nil
}
// ProcFDInfos represents a list of ProcFDInfo structs.
type ProcFDInfos []ProcFDInfo
func (p ProcFDInfos) Len() int { return len(p) }
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
// InotifyWatchLen returns the total number of inotify watches
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
length := 0
for _, f := range p {
length += len(f.InotifyInfos)
}
return length, nil
}

View File

@@ -39,8 +39,8 @@ type ProcIO struct {
CancelledWriteBytes int64
}
// NewIO creates a new ProcIO instance from a given Proc instance.
func (p Proc) NewIO() (ProcIO, error) {
// IO creates a new ProcIO instance from a given Proc instance.
func (p Proc) IO() (ProcIO, error) {
pio := ProcIO{}
f, err := os.Open(p.path("io"))

View File

@@ -78,7 +78,14 @@ var (
)
// NewLimits returns the current soft limits of the process.
//
// Deprecated: use p.Limits() instead
func (p Proc) NewLimits() (ProcLimits, error) {
return p.Limits()
}
// Limits returns the current soft limits of the process.
func (p Proc) Limits() (ProcLimits, error) {
f, err := os.Open(p.path("limits"))
if err != nil {
return ProcLimits{}, err

View File

@@ -29,9 +29,9 @@ type Namespace struct {
// Namespaces contains all of the namespaces that the process is contained in.
type Namespaces map[string]Namespace
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
// Namespaces reads from /proc/<pid>/ns/* to get the namespaces of which the
// process is a member.
func (p Proc) NewNamespaces() (Namespaces, error) {
func (p Proc) Namespaces() (Namespaces, error) {
d, err := os.Open(p.path("ns"))
if err != nil {
return nil, err

View File

@@ -51,20 +51,11 @@ type PSIStats struct {
Full *PSILine
}
// NewPSIStatsForResource reads pressure stall information for the specified
// resource. At time of writing this can be either "cpu", "memory" or "io".
func NewPSIStatsForResource(resource string) (PSIStats, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return PSIStats{}, err
}
return fs.NewPSIStatsForResource(resource)
}
// NewPSIStatsForResource reads pressure stall information from /proc/pressure/<resource>
func (fs FS) NewPSIStatsForResource(resource string) (PSIStats, error) {
file, err := os.Open(fs.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
// PSIStatsForResource reads pressure stall information for the specified
// resource from /proc/pressure/<resource>. At time of writing this can be
// either "cpu", "memory" or "io".
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
}

View File

@@ -18,6 +18,8 @@ import (
"fmt"
"io/ioutil"
"os"
"github.com/prometheus/procfs/internal/fs"
)
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
@@ -99,11 +101,18 @@ type ProcStat struct {
// Resident set size in pages.
RSS int
fs FS
proc fs.FS
}
// NewStat returns the current status information of the process.
//
// Deprecated: use p.Stat() instead
func (p Proc) NewStat() (ProcStat, error) {
return p.Stat()
}
// Stat returns the current status information of the process.
func (p Proc) Stat() (ProcStat, error) {
f, err := os.Open(p.path("stat"))
if err != nil {
return ProcStat{}, err
@@ -118,7 +127,7 @@ func (p Proc) NewStat() (ProcStat, error) {
var (
ignore int
s = ProcStat{PID: p.PID, fs: p.fs}
s = ProcStat{PID: p.PID, proc: p.fs}
l = bytes.Index(data, []byte("("))
r = bytes.LastIndex(data, []byte(")"))
)
@@ -175,7 +184,8 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
stat, err := s.fs.NewStat()
fs := FS{proc: s.proc}
stat, err := fs.Stat()
if err != nil {
return 0, err
}

167
vendor/github.com/prometheus/procfs/proc_status.go generated vendored Normal file
View File

@@ -0,0 +1,167 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"strings"
)
// ProcStatus provides status information about the process,
// read from /proc/[pid]/stat.
type ProcStatus struct {
// The process ID.
PID int
// The process name.
Name string
// Thread group ID.
TGID int
// Peak virtual memory size.
VmPeak uint64
// Virtual memory size.
VmSize uint64
// Locked memory size.
VmLck uint64
// Pinned memory size.
VmPin uint64
// Peak resident set size.
VmHWM uint64
// Resident set size (sum of RssAnnon RssFile and RssShmem).
VmRSS uint64
// Size of resident anonymous memory.
RssAnon uint64
// Size of resident file mappings.
RssFile uint64
// Size of resident shared memory.
RssShmem uint64
// Size of data segments.
VmData uint64
// Size of stack segments.
VmStk uint64
// Size of text segments.
VmExe uint64
// Shared library code size.
VmLib uint64
// Page table entries size.
VmPTE uint64
// Size of second-level page tables.
VmPMD uint64
// Swapped-out virtual memory size by anonymous private.
VmSwap uint64
// Size of hugetlb memory portions
HugetlbPages uint64
// Number of voluntary context switches.
VoluntaryCtxtSwitches uint64
// Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64
}
// NewStatus returns the current status information of the process.
func (p Proc) NewStatus() (ProcStatus, error) {
f, err := os.Open(p.path("status"))
if err != nil {
return ProcStatus{}, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return ProcStatus{}, err
}
s := ProcStatus{PID: p.PID}
lines := strings.Split(string(data), "\n")
for _, line := range lines {
if !bytes.Contains([]byte(line), []byte(":")) {
continue
}
kv := strings.SplitN(line, ":", 2)
// removes spaces
k := string(strings.TrimSpace(kv[0]))
v := string(strings.TrimSpace(kv[1]))
// removes "kB"
v = string(bytes.Trim([]byte(v), " kB"))
// value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string
vKBytes, _ := strconv.ParseUint(v, 10, 64)
// convert kB to B
vBytes := vKBytes * 1024
s.fillStatus(k, v, vKBytes, vBytes)
}
return s, nil
}
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
s.VmSize = vUintBytes
case "VmLck":
s.VmLck = vUintBytes
case "VmPin":
s.VmPin = vUintBytes
case "VmHWM":
s.VmHWM = vUintBytes
case "VmRSS":
s.VmRSS = vUintBytes
case "RssAnon":
s.RssAnon = vUintBytes
case "RssFile":
s.RssFile = vUintBytes
case "RssShmem":
s.RssShmem = vUintBytes
case "VmData":
s.VmData = vUintBytes
case "VmStk":
s.VmStk = vUintBytes
case "VmExe":
s.VmExe = vUintBytes
case "VmLib":
s.VmLib = vUintBytes
case "VmPTE":
s.VmPTE = vUintBytes
case "VmPMD":
s.VmPMD = vUintBytes
case "VmSwap":
s.VmSwap = vUintBytes
case "HugetlbPages":
s.HugetlbPages = vUintBytes
case "voluntary_ctxt_switches":
s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint
}
}
// TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
}

118
vendor/github.com/prometheus/procfs/schedstat.go generated vendored Normal file
View File

@@ -0,0 +1,118 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"errors"
"os"
"regexp"
"strconv"
)
var (
cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
)
// Schedstat contains scheduler statistics from /proc/schedstat
//
// See
// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
// for a detailed description of what these numbers mean.
//
// Note the current kernel documentation claims some of the time units are in
// jiffies when they are actually in nanoseconds since 2.6.23 with the
// introduction of CFS. A fix to the documentation is pending. See
// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
type Schedstat struct {
CPUs []*SchedstatCPU
}
// SchedstatCPU contains the values from one "cpu<N>" line
type SchedstatCPU struct {
CPUNum string
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
// ProcSchedstat contains the values from /proc/<pid>/schedstat
type ProcSchedstat struct {
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
// Schedstat reads data from /proc/schedstat
func (fs FS) Schedstat() (*Schedstat, error) {
file, err := os.Open(fs.proc.Path("schedstat"))
if err != nil {
return nil, err
}
defer file.Close()
stats := &Schedstat{}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
match := cpuLineRE.FindStringSubmatch(scanner.Text())
if match != nil {
cpu := &SchedstatCPU{}
cpu.CPUNum = match[1]
cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
if err != nil {
continue
}
cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
if err != nil {
continue
}
cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
if err != nil {
continue
}
stats.CPUs = append(stats.CPUs, cpu)
}
}
return stats, nil
}
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
match := procLineRE.FindStringSubmatch(contents)
if match != nil {
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
if err != nil {
return
}
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
if err != nil {
return
}
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
return
}
err = errors.New("could not parse schedstat")
return
}

View File

@@ -20,6 +20,8 @@ import (
"os"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/fs"
)
// CPUStat shows how much time the cpu spend in various stages.
@@ -78,16 +80,6 @@ type Stat struct {
SoftIRQ SoftIRQStat
}
// NewStat returns kernel/system statistics read from /proc/stat.
func NewStat() (Stat, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return Stat{}, err
}
return fs.NewStat()
}
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
func parseCPUStat(line string) (CPUStat, int64, error) {
cpuStat := CPUStat{}
@@ -149,11 +141,31 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
return softIRQStat, total, nil
}
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
// NewStat returns information about current cpu/process statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
//
// Deprecated: use fs.Stat() instead
func NewStat() (Stat, error) {
fs, err := NewFS(fs.DefaultProcMountPoint)
if err != nil {
return Stat{}, err
}
return fs.Stat()
}
f, err := os.Open(fs.Path("stat"))
// NewStat returns information about current cpu/process statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
//
// Deprecated: use fs.Stat() instead
func (fs FS) NewStat() (Stat, error) {
return fs.Stat()
}
// Stat returns information about current cpu/process statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) Stat() (Stat, error) {
f, err := os.Open(fs.proc.Path("stat"))
if err != nil {
return Stat{}, err
}

View File

@@ -86,8 +86,10 @@ Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
$bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
Options:
-C <DIR> (change directory)
-v (verbose)
-C <DIR> (change directory)
-v (verbose)
--recursive-unlink (recursively delete existing directory if path
collides with file or directory to extract)
Example: Change to sysfs directory, create ttar file from fixtures directory
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
@@ -111,8 +113,9 @@ function set_cmd {
}
unset VERBOSE
unset RECURSIVE_UNLINK
while getopts :cf:htxvC: opt; do
while getopts :cf:-:htxvC: opt; do
case $opt in
c)
set_cmd "create"
@@ -136,6 +139,18 @@ while getopts :cf:htxvC: opt; do
C)
CDIR=$OPTARG
;;
-)
case $OPTARG in
recursive-unlink)
RECURSIVE_UNLINK="yes"
;;
*)
echo -e "Error: invalid option -$OPTARG"
echo
usage 1
;;
esac
;;
*)
echo >&2 "ERROR: invalid option -$OPTARG"
echo
@@ -212,16 +227,16 @@ function extract {
local eof_without_newline
if [ "$size" -gt 0 ]; then
if [[ "$line" =~ [^\\]EOF ]]; then
# An EOF not preceeded by a backslash indicates that the line
# An EOF not preceded by a backslash indicates that the line
# does not end with a newline
eof_without_newline=1
else
eof_without_newline=0
fi
# Replace NULLBYTE with null byte if at beginning of line
# Replace NULLBYTE with null byte unless preceeded by backslash
# Replace NULLBYTE with null byte unless preceded by backslash
# Remove one backslash in front of NULLBYTE (if any)
# Remove EOF unless preceeded by backslash
# Remove EOF unless preceded by backslash
# Remove one backslash in front of EOF
if [ $USE_PYTHON -eq 1 ]; then
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
@@ -245,7 +260,16 @@ function extract {
fi
if [[ $line =~ ^Path:\ (.*)$ ]]; then
path=${BASH_REMATCH[1]}
if [ -e "$path" ] || [ -L "$path" ]; then
if [ -L "$path" ]; then
rm "$path"
elif [ -d "$path" ]; then
if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
rm -r "$path"
else
# Safe because symlinks to directories are dealt with above
rmdir "$path"
fi
elif [ -e "$path" ]; then
rm "$path"
fi
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
@@ -338,8 +362,8 @@ function _create {
else
< "$file" \
sed 's/EOF/\\EOF/g;
s/NULLBYTE/\\NULLBYTE/g;
s/\x0/NULLBYTE/g;
s/NULLBYTE/\\NULLBYTE/g;
s/\x0/NULLBYTE/g;
'
fi
if [[ "$eof_without_newline" -eq 1 ]]; then

210
vendor/github.com/prometheus/procfs/vm.go generated vendored Normal file
View File

@@ -0,0 +1,210 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// The VM interface is described at
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string
type VM struct {
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
BlockDump *int64 // /proc/sys/vm/block_dump
CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
DropCaches *int64 // /proc/sys/vm/drop_caches
ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
LaptopMode *int64 // /proc/sys/vm/laptop_mode
LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
MaxMapCount *int64 // /proc/sys/vm/max_map_count
MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
NrHugepages *int64 // /proc/sys/vm/nr_hugepages
NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
NumaStat *int64 // /proc/sys/vm/numa_stat
NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
PageCluster *int64 // /proc/sys/vm/page-cluster
PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
StatInterval *int64 // /proc/sys/vm/stat_interval
Swappiness *int64 // /proc/sys/vm/swappiness
UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
}
// VM reads the VM statistics from the specified `proc` filesystem.
func (fs FS) VM() (*VM, error) {
path := fs.proc.Path("sys/vm")
file, err := os.Stat(path)
if err != nil {
return nil, err
}
if !file.Mode().IsDir() {
return nil, fmt.Errorf("%s is not a directory", path)
}
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var vm VM
for _, f := range files {
if f.IsDir() {
continue
}
name := filepath.Join(path, f.Name())
// ignore errors on read, as there are some write only
// in /proc/sys/vm
value, err := util.SysReadFile(name)
if err != nil {
continue
}
vp := util.NewValueParser(value)
switch f.Name() {
case "admin_reserve_kbytes":
vm.AdminReserveKbytes = vp.PInt64()
case "block_dump":
vm.BlockDump = vp.PInt64()
case "compact_unevictable_allowed":
vm.CompactUnevictableAllowed = vp.PInt64()
case "dirty_background_bytes":
vm.DirtyBackgroundBytes = vp.PInt64()
case "dirty_background_ratio":
vm.DirtyBackgroundRatio = vp.PInt64()
case "dirty_bytes":
vm.DirtyBytes = vp.PInt64()
case "dirty_expire_centisecs":
vm.DirtyExpireCentisecs = vp.PInt64()
case "dirty_ratio":
vm.DirtyRatio = vp.PInt64()
case "dirtytime_expire_seconds":
vm.DirtytimeExpireSeconds = vp.PInt64()
case "dirty_writeback_centisecs":
vm.DirtyWritebackCentisecs = vp.PInt64()
case "drop_caches":
vm.DropCaches = vp.PInt64()
case "extfrag_threshold":
vm.ExtfragThreshold = vp.PInt64()
case "hugetlb_shm_group":
vm.HugetlbShmGroup = vp.PInt64()
case "laptop_mode":
vm.LaptopMode = vp.PInt64()
case "legacy_va_layout":
vm.LegacyVaLayout = vp.PInt64()
case "lowmem_reserve_ratio":
stringSlice := strings.Fields(value)
pint64Slice := make([]*int64, 0, len(stringSlice))
for _, value := range stringSlice {
vp := util.NewValueParser(value)
pint64Slice = append(pint64Slice, vp.PInt64())
}
vm.LowmemReserveRatio = pint64Slice
case "max_map_count":
vm.MaxMapCount = vp.PInt64()
case "memory_failure_early_kill":
vm.MemoryFailureEarlyKill = vp.PInt64()
case "memory_failure_recovery":
vm.MemoryFailureRecovery = vp.PInt64()
case "min_free_kbytes":
vm.MinFreeKbytes = vp.PInt64()
case "min_slab_ratio":
vm.MinSlabRatio = vp.PInt64()
case "min_unmapped_ratio":
vm.MinUnmappedRatio = vp.PInt64()
case "mmap_min_addr":
vm.MmapMinAddr = vp.PInt64()
case "nr_hugepages":
vm.NrHugepages = vp.PInt64()
case "nr_hugepages_mempolicy":
vm.NrHugepagesMempolicy = vp.PInt64()
case "nr_overcommit_hugepages":
vm.NrOvercommitHugepages = vp.PInt64()
case "numa_stat":
vm.NumaStat = vp.PInt64()
case "numa_zonelist_order":
vm.NumaZonelistOrder = value
case "oom_dump_tasks":
vm.OomDumpTasks = vp.PInt64()
case "oom_kill_allocating_task":
vm.OomKillAllocatingTask = vp.PInt64()
case "overcommit_kbytes":
vm.OvercommitKbytes = vp.PInt64()
case "overcommit_memory":
vm.OvercommitMemory = vp.PInt64()
case "overcommit_ratio":
vm.OvercommitRatio = vp.PInt64()
case "page-cluster":
vm.PageCluster = vp.PInt64()
case "panic_on_oom":
vm.PanicOnOom = vp.PInt64()
case "percpu_pagelist_fraction":
vm.PercpuPagelistFraction = vp.PInt64()
case "stat_interval":
vm.StatInterval = vp.PInt64()
case "swappiness":
vm.Swappiness = vp.PInt64()
case "user_reserve_kbytes":
vm.UserReserveKbytes = vp.PInt64()
case "vfs_cache_pressure":
vm.VfsCachePressure = vp.PInt64()
case "watermark_boost_factor":
vm.WatermarkBoostFactor = vp.PInt64()
case "watermark_scale_factor":
vm.WatermarkScaleFactor = vp.PInt64()
case "zone_reclaim_mode":
vm.ZoneReclaimMode = vp.PInt64()
}
if err := vp.Err(); err != nil {
return nil, err
}
}
return &vm, nil
}

View File

@@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) {
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
func (fs FS) NewXfrmStat() (XfrmStat, error) {
file, err := os.Open(fs.Path("net/xfrm_stat"))
file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
if err != nil {
return XfrmStat{}, err
}

196
vendor/github.com/prometheus/procfs/zoneinfo.go generated vendored Normal file
View File

@@ -0,0 +1,196 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"bytes"
"fmt"
"io/ioutil"
"regexp"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Zoneinfo holds info parsed from /proc/zoneinfo.
type Zoneinfo struct {
Node string
Zone string
NrFreePages *int64
Min *int64
Low *int64
High *int64
Scanned *int64
Spanned *int64
Present *int64
Managed *int64
NrActiveAnon *int64
NrInactiveAnon *int64
NrIsolatedAnon *int64
NrAnonPages *int64
NrAnonTransparentHugepages *int64
NrActiveFile *int64
NrInactiveFile *int64
NrIsolatedFile *int64
NrFilePages *int64
NrSlabReclaimable *int64
NrSlabUnreclaimable *int64
NrMlockStack *int64
NrKernelStack *int64
NrMapped *int64
NrDirty *int64
NrWriteback *int64
NrUnevictable *int64
NrShmem *int64
NrDirtied *int64
NrWritten *int64
NumaHit *int64
NumaMiss *int64
NumaForeign *int64
NumaInterleave *int64
NumaLocal *int64
NumaOther *int64
Protection []*int64
}
var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
// structs containing the relevant info. More information available here:
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}
func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
zoneinfo := []Zoneinfo{}
zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
for _, block := range zoneinfoBlocks {
var zoneinfoElement Zoneinfo
lines := strings.Split(string(block), "\n")
for _, line := range lines {
if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
zoneinfoElement.Node = nodeZone[1]
zoneinfoElement.Zone = nodeZone[2]
continue
}
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
zoneinfoElement.Zone = ""
continue
}
parts := strings.Fields(strings.TrimSpace(line))
if len(parts) < 2 {
continue
}
vp := util.NewValueParser(parts[1])
switch parts[0] {
case "nr_free_pages":
zoneinfoElement.NrFreePages = vp.PInt64()
case "min":
zoneinfoElement.Min = vp.PInt64()
case "low":
zoneinfoElement.Low = vp.PInt64()
case "high":
zoneinfoElement.High = vp.PInt64()
case "scanned":
zoneinfoElement.Scanned = vp.PInt64()
case "spanned":
zoneinfoElement.Spanned = vp.PInt64()
case "present":
zoneinfoElement.Present = vp.PInt64()
case "managed":
zoneinfoElement.Managed = vp.PInt64()
case "nr_active_anon":
zoneinfoElement.NrActiveAnon = vp.PInt64()
case "nr_inactive_anon":
zoneinfoElement.NrInactiveAnon = vp.PInt64()
case "nr_isolated_anon":
zoneinfoElement.NrIsolatedAnon = vp.PInt64()
case "nr_anon_pages":
zoneinfoElement.NrAnonPages = vp.PInt64()
case "nr_anon_transparent_hugepages":
zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
case "nr_active_file":
zoneinfoElement.NrActiveFile = vp.PInt64()
case "nr_inactive_file":
zoneinfoElement.NrInactiveFile = vp.PInt64()
case "nr_isolated_file":
zoneinfoElement.NrIsolatedFile = vp.PInt64()
case "nr_file_pages":
zoneinfoElement.NrFilePages = vp.PInt64()
case "nr_slab_reclaimable":
zoneinfoElement.NrSlabReclaimable = vp.PInt64()
case "nr_slab_unreclaimable":
zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
case "nr_mlock_stack":
zoneinfoElement.NrMlockStack = vp.PInt64()
case "nr_kernel_stack":
zoneinfoElement.NrKernelStack = vp.PInt64()
case "nr_mapped":
zoneinfoElement.NrMapped = vp.PInt64()
case "nr_dirty":
zoneinfoElement.NrDirty = vp.PInt64()
case "nr_writeback":
zoneinfoElement.NrWriteback = vp.PInt64()
case "nr_unevictable":
zoneinfoElement.NrUnevictable = vp.PInt64()
case "nr_shmem":
zoneinfoElement.NrShmem = vp.PInt64()
case "nr_dirtied":
zoneinfoElement.NrDirtied = vp.PInt64()
case "nr_written":
zoneinfoElement.NrWritten = vp.PInt64()
case "numa_hit":
zoneinfoElement.NumaHit = vp.PInt64()
case "numa_miss":
zoneinfoElement.NumaMiss = vp.PInt64()
case "numa_foreign":
zoneinfoElement.NumaForeign = vp.PInt64()
case "numa_interleave":
zoneinfoElement.NumaInterleave = vp.PInt64()
case "numa_local":
zoneinfoElement.NumaLocal = vp.PInt64()
case "numa_other":
zoneinfoElement.NumaOther = vp.PInt64()
case "protection:":
protectionParts := strings.Split(line, ":")
protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
protectionValues = strings.Replace(protectionValues, ")", "", 1)
protectionValues = strings.TrimSpace(protectionValues)
protectionStringMap := strings.Split(protectionValues, ", ")
val, err := util.ParsePInt64s(protectionStringMap)
if err == nil {
zoneinfoElement.Protection = val
}
}
}
zoneinfo = append(zoneinfo, zoneinfoElement)
}
return zoneinfo, nil
}