go mod vendor

This commit is contained in:
Jan Wozniak 2020-02-12 15:44:54 +01:00
parent 679bb9dc07
commit 40f35a984c
50 changed files with 3836 additions and 1645 deletions

View File

@ -1,7 +1,8 @@
language: go
go:
- 1.3
- 1.4
- "1.3"
- "1.4"
- "1.10"
script:
- go test
- go build

View File

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strconv"
@ -26,15 +27,19 @@ func Marshal(o interface{}) ([]byte, error) {
return y, nil
}
// Converts YAML to JSON then uses JSON to unmarshal into an object.
func Unmarshal(y []byte, o interface{}) error {
// JSONOpt is a decoding option for decoding from JSON format.
type JSONOpt func(*json.Decoder) *json.Decoder
// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
// optionally configuring the behavior of the JSON unmarshal.
func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
vo := reflect.ValueOf(o)
j, err := yamlToJSON(y, &vo)
j, err := yamlToJSON(y, &vo, yaml.Unmarshal)
if err != nil {
return fmt.Errorf("error converting YAML to JSON: %v", err)
}
err = json.Unmarshal(j, o)
err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
if err != nil {
return fmt.Errorf("error unmarshaling JSON: %v", err)
}
@ -42,6 +47,21 @@ func Unmarshal(y []byte, o interface{}) error {
return nil
}
// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
// object, optionally applying decoder options prior to decoding. We are not
// using json.Unmarshal directly as we want the chance to pass in non-default
// options.
func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
d := json.NewDecoder(r)
for _, opt := range opts {
d = opt(d)
}
if err := d.Decode(&o); err != nil {
return fmt.Errorf("while decoding JSON: %v", err)
}
return nil
}
// Convert JSON to YAML.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
@ -60,8 +80,8 @@ func JSONToYAML(j []byte) ([]byte, error) {
return yaml.Marshal(jsonObj)
}
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
// this method should be a no-op.
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
// passing JSON through this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// * In YAML you can have binary and null keys in your maps. These are invalid
@ -70,14 +90,22 @@ func JSONToYAML(j []byte) ([]byte, error) {
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
//
// For strict decoding of YAML, use YAMLToJSONStrict.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil)
return yamlToJSON(y, nil, yaml.Unmarshal)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
// returning an error on any duplicate field names.
func YAMLToJSONStrict(y []byte) ([]byte, error) {
return yamlToJSON(y, nil, yaml.UnmarshalStrict)
}
func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
err := yaml.Unmarshal(y, &yamlObj)
err := yamlUnmarshal(y, &yamlObj)
if err != nil {
return nil, err
}

14
vendor/github.com/ghodss/yaml/yaml_go110.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// This file contains changes that are only compatible with go 1.10 and onwards.
// +build go1.10
package yaml
import "encoding/json"
// DisallowUnknownFields configures the JSON decoder to error out if unknown
// fields come along, instead of dropping them by default.
func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
d.DisallowUnknownFields()
return d
}

View File

@ -1,14 +1,10 @@
language: go
go_import_path: github.com/pkg/errors
go:
- 1.4.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- 1.4.3
- 1.5.4
- 1.6.2
- 1.7.1
- tip
script:

View File

@ -1,4 +1,4 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
@ -47,6 +47,6 @@ We welcome pull requests, bug fixes and issue reports. With that said, the bar f
Before proposing a change, please discuss your change by raising an issue.
## License
## Licence
BSD-2-Clause

View File

@ -6,7 +6,7 @@
// return err
// }
//
// which when applied recursively up the call stack results in error reports
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
@ -15,17 +15,16 @@
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// together with the supplied message. For example
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required, the errors.WithStack and
// errors.WithMessage functions destructure errors.Wrap into its component
// operations: annotating an error with a stack trace and with a message,
// respectively.
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
@ -39,7 +38,7 @@
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error that does not implement causer, which is assumed to be
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
@ -49,16 +48,16 @@
// // unknown error
// }
//
// Although the causer interface is not exported by this package, it is
// considered a part of its stable public interface.
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported:
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively.
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
@ -66,13 +65,13 @@
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface:
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// The returned errors.StackTrace type is defined as
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
@ -86,8 +85,8 @@
// }
// }
//
// Although the stackTracer interface is not exported by this package, it is
// considered a part of its stable public interface.
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
@ -193,7 +192,7 @@ func Wrap(err error, message string) error {
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is called, and the format specifier.
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
@ -221,18 +220,6 @@ func WithMessage(err error, message string) error {
}
}
// WithMessagef annotates err with the format specifier.
// If err is nil, WithMessagef returns nil.
func WithMessagef(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
}
type withMessage struct {
cause error
msg string

View File

@ -46,8 +46,7 @@ func (f Frame) line() int {
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s function name and path of source file relative to the compile time
// GOPATH separated by \n\t (<funcname>\n\t<path>)
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
@ -80,14 +79,6 @@ func (f Frame) Format(s fmt.State, verb rune) {
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
// Format formats the stack of Frames according to the fmt.Formatter interface.
//
// %s lists source files for each Frame in the stack
// %v lists the source file and line number for each Frame in the stack
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
@ -145,3 +136,43 @@ func funcname(name string) string {
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

View File

@ -1,12 +1,12 @@
/*
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

View File

@ -21,6 +21,7 @@ import (
)
var (
separator = []byte{0}
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.

View File

@ -1,2 +1 @@
* Tobias Schmidt <tobidt@gmail.com> @grobie
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
* Tobias Schmidt <tobidt@gmail.com>

View File

@ -17,12 +17,14 @@ include Makefile.common
./ttar -C $(dir $*) -x -f $*.ttar
touch $@
update_fixtures:
rm -vf fixtures/.unpacked
./ttar -c -f fixtures.ttar fixtures/
update_fixtures: fixtures.ttar sysfs/fixtures.ttar
%fixtures.ttar: %/fixtures
rm -v $(dir $*)fixtures/.unpacked
./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
.PHONY: build
build:
.PHONY: test
test: fixtures/.unpacked common-test
test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test

View File

@ -29,15 +29,12 @@ GO ?= go
GOFMT ?= $(GO)fmt
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
GOOPTS ?=
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
GO_VERSION ?= $(shell $(GO) version)
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
GOVENDOR :=
GO111MODULE :=
unexport GOVENDOR
ifeq (, $(PRE_GO_111))
ifneq (,$(wildcard go.mod))
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
@ -58,51 +55,32 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$(
# This repository isn't using Go modules (yet).
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
endif
unexport GO111MODULE
endif
PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
pkgs = ./...
ifeq (arm, $(GOHOSTARCH))
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif
GO_VERSION ?= $(shell $(GO) version)
GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION)))
PROMU_VERSION ?= 0.3.0
PROMU_VERSION ?= 0.2.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
STATICCHECK :=
# staticcheck only supports linux, freebsd, darwin and windows platforms on i386/amd64
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
STATICCHECK_VERSION ?= 2019.1
STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH)
endif
endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKER_REPO ?= prom
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
test-flags := -race
endif
endif
.PHONY: all
all: precheck style staticcheck unused build test
# This rule is used to forward a target like "build" to "common-build". This
# allows a new "build" target to be defined in a Makefile which includes this
# one and override "common-build" without override warnings.
%: common-% ;
.PHONY: common-all
common-all: precheck style check_license staticcheck unused build test
.PHONY: common-style
common-style:
@echo ">> checking code style"
@ -124,15 +102,6 @@ common-check_license:
exit 1; \
fi
.PHONY: common-deps
common-deps:
@echo ">> getting dependencies"
ifdef GO111MODULE
GO111MODULE=$(GO111MODULE) $(GO) mod download
else
$(GO) get $(GOOPTS) -t ./...
endif
.PHONY: common-test-short
common-test-short:
@echo ">> running short tests"
@ -141,12 +110,12 @@ common-test-short:
.PHONY: common-test
common-test:
@echo ">> running all tests"
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs)
.PHONY: common-format
common-format:
@echo ">> formatting code"
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs)
.PHONY: common-vet
common-vet:
@ -155,18 +124,12 @@ common-vet:
.PHONY: common-staticcheck
common-staticcheck: $(STATICCHECK)
ifdef STATICCHECK
@echo ">> running staticcheck"
chmod +x $(STATICCHECK)
ifdef GO111MODULE
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs)
else
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
endif
endif
.PHONY: common-unused
common-unused: $(GOVENDOR)
@ -177,9 +140,8 @@ else
ifdef GO111MODULE
@echo ">> running check for unused/missing packages in go.mod"
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
ifeq (,$(wildcard vendor))
@git diff --exit-code -- go.sum go.mod
else
ifneq (,$(wildcard vendor))
@echo ">> running check for unused packages in vendor/"
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
@git diff --exit-code -- go.sum go.mod vendor/
@ -213,21 +175,29 @@ common-docker-tag-latest:
promu: $(PROMU)
$(PROMU):
$(eval PROMU_TMP := $(shell mktemp -d))
curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
mkdir -p $(FIRST_GOPATH)/bin
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
rm -r $(PROMU_TMP)
curl -s -L $(PROMU_URL) | tar -xvz -C /tmp
mkdir -v -p $(FIRST_GOPATH)/bin
cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU)
.PHONY: proto
proto:
@echo ">> generating code from proto files"
@./scripts/genproto.sh
ifdef STATICCHECK
.PHONY: $(STATICCHECK)
$(STATICCHECK):
mkdir -p $(FIRST_GOPATH)/bin
curl -s -L $(STATICCHECK_URL) > $(STATICCHECK)
ifdef GO111MODULE
# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}.
# See https://github.com/golang/go/issues/27643.
# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules.
tmpModule=$$(mktemp -d 2>&1) && \
mkdir -p $${tmpModule}/staticcheck && \
cd "$${tmpModule}"/staticcheck && \
GO111MODULE=on $(GO) mod init example.com/staticcheck && \
GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \
rm -rf $${tmpModule};
else
GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck
endif
ifdef GOVENDOR
@ -242,6 +212,7 @@ precheck::
define PRECHECK_COMMAND_template =
precheck:: $(1)_precheck
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
.PHONY: $(1)_precheck
$(1)_precheck:

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,9 @@ import (
"fmt"
"os"
"path"
"github.com/prometheus/procfs/nfs"
"github.com/prometheus/procfs/xfs"
)
// FS represents the pseudo-filesystem proc, which provides an interface to
@ -44,3 +47,36 @@ func NewFS(mountPoint string) (FS, error) {
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
// XFSStats retrieves XFS filesystem runtime statistics.
func (fs FS) XFSStats() (*xfs.Stats, error) {
f, err := os.Open(fs.Path("fs/xfs/stat"))
if err != nil {
return nil, err
}
defer f.Close()
return xfs.ParseStats(f)
}
// NFSClientRPCStats retrieves NFS client RPC statistics.
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
f, err := os.Open(fs.Path("net/rpc/nfs"))
if err != nil {
return nil, err
}
defer f.Close()
return nfs.ParseClientRPCStats(f)
}
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
f, err := os.Open(fs.Path("net/rpc/nfsd"))
if err != nil {
return nil, err
}
defer f.Close()
return nfs.ParseServerRPCStats(f)
}

View File

@ -1,3 +1 @@
module github.com/prometheus/procfs
require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4

View File

@ -1,2 +0,0 @@
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -0,0 +1,59 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io/ioutil"
"strconv"
"strings"
)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}
// ParseUint64s parses a slice of strings into a slice of uint64s.
func ParseUint64s(ss []string) ([]uint64, error) {
us := make([]uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
}

View File

@ -0,0 +1,45 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package util
import (
"bytes"
"os"
"syscall"
)
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
func SysReadFile(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
// Go's ioutil.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
b := make([]byte, 128)
n, err := syscall.Read(int(f.Fd()), b)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(b[:n])), nil
}

View File

@ -69,8 +69,6 @@ type MountStats interface {
type MountStatsNFS struct {
// The version of statistics provided.
StatVersion string
// The optional mountaddr of the NFS mount.
MountAddress string
// The age of the NFS mount.
Age time.Duration
// Statistics related to byte counters for various operations.
@ -319,7 +317,6 @@ func parseMount(ss []string) (*Mount, error) {
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data
const (
fieldOpts = "opts:"
fieldAge = "age:"
fieldBytes = "bytes:"
fieldEvents = "events:"
@ -341,13 +338,6 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
switch ss[0] {
case fieldOpts:
for _, opt := range strings.Split(ss[1], ",") {
split := strings.Split(opt, "=")
if len(split) == 2 && split[0] == "mountaddr" {
stats.MountAddress = split[1]
}
}
case fieldAge:
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")

263
vendor/github.com/prometheus/procfs/nfs/nfs.go generated vendored Normal file
View File

@ -0,0 +1,263 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package nfs implements parsing of /proc/net/rpc/nfsd.
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
package nfs
// ReplyCache models the "rc" line.
type ReplyCache struct {
Hits uint64
Misses uint64
NoCache uint64
}
// FileHandles models the "fh" line.
type FileHandles struct {
Stale uint64
TotalLookups uint64
AnonLookups uint64
DirNoCache uint64
NoDirNoCache uint64
}
// InputOutput models the "io" line.
type InputOutput struct {
Read uint64
Write uint64
}
// Threads models the "th" line.
type Threads struct {
Threads uint64
FullCnt uint64
}
// ReadAheadCache models the "ra" line.
type ReadAheadCache struct {
CacheSize uint64
CacheHistogram []uint64
NotFound uint64
}
// Network models the "net" line.
type Network struct {
NetCount uint64
UDPCount uint64
TCPCount uint64
TCPConnect uint64
}
// ClientRPC models the nfs "rpc" line.
type ClientRPC struct {
RPCCount uint64
Retransmissions uint64
AuthRefreshes uint64
}
// ServerRPC models the nfsd "rpc" line.
type ServerRPC struct {
RPCCount uint64
BadCnt uint64
BadFmt uint64
BadAuth uint64
BadcInt uint64
}
// V2Stats models the "proc2" line.
type V2Stats struct {
Null uint64
GetAttr uint64
SetAttr uint64
Root uint64
Lookup uint64
ReadLink uint64
Read uint64
WrCache uint64
Write uint64
Create uint64
Remove uint64
Rename uint64
Link uint64
SymLink uint64
MkDir uint64
RmDir uint64
ReadDir uint64
FsStat uint64
}
// V3Stats models the "proc3" line.
type V3Stats struct {
Null uint64
GetAttr uint64
SetAttr uint64
Lookup uint64
Access uint64
ReadLink uint64
Read uint64
Write uint64
Create uint64
MkDir uint64
SymLink uint64
MkNod uint64
Remove uint64
RmDir uint64
Rename uint64
Link uint64
ReadDir uint64
ReadDirPlus uint64
FsStat uint64
FsInfo uint64
PathConf uint64
Commit uint64
}
// ClientV4Stats models the nfs "proc4" line.
type ClientV4Stats struct {
Null uint64
Read uint64
Write uint64
Commit uint64
Open uint64
OpenConfirm uint64
OpenNoattr uint64
OpenDowngrade uint64
Close uint64
Setattr uint64
FsInfo uint64
Renew uint64
SetClientID uint64
SetClientIDConfirm uint64
Lock uint64
Lockt uint64
Locku uint64
Access uint64
Getattr uint64
Lookup uint64
LookupRoot uint64
Remove uint64
Rename uint64
Link uint64
Symlink uint64
Create uint64
Pathconf uint64
StatFs uint64
ReadLink uint64
ReadDir uint64
ServerCaps uint64
DelegReturn uint64
GetACL uint64
SetACL uint64
FsLocations uint64
ReleaseLockowner uint64
Secinfo uint64
FsidPresent uint64
ExchangeID uint64
CreateSession uint64
DestroySession uint64
Sequence uint64
GetLeaseTime uint64
ReclaimComplete uint64
LayoutGet uint64
GetDeviceInfo uint64
LayoutCommit uint64
LayoutReturn uint64
SecinfoNoName uint64
TestStateID uint64
FreeStateID uint64
GetDeviceList uint64
BindConnToSession uint64
DestroyClientID uint64
Seek uint64
Allocate uint64
DeAllocate uint64
LayoutStats uint64
Clone uint64
}
// ServerV4Stats models the nfsd "proc4" line.
type ServerV4Stats struct {
Null uint64
Compound uint64
}
// V4Ops models the "proc4ops" line: NFSv4 operations
// Variable list, see:
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
type V4Ops struct {
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
Op0Unused uint64
Op1Unused uint64
Op2Future uint64
Access uint64
Close uint64
Commit uint64
Create uint64
DelegPurge uint64
DelegReturn uint64
GetAttr uint64
GetFH uint64
Link uint64
Lock uint64
Lockt uint64
Locku uint64
Lookup uint64
LookupRoot uint64
Nverify uint64
Open uint64
OpenAttr uint64
OpenConfirm uint64
OpenDgrd uint64
PutFH uint64
PutPubFH uint64
PutRootFH uint64
Read uint64
ReadDir uint64
ReadLink uint64
Remove uint64
Rename uint64
Renew uint64
RestoreFH uint64
SaveFH uint64
SecInfo uint64
SetAttr uint64
Verify uint64
Write uint64
RelLockOwner uint64
}
// ClientRPCStats models all stats from /proc/net/rpc/nfs.
type ClientRPCStats struct {
Network Network
ClientRPC ClientRPC
V2Stats V2Stats
V3Stats V3Stats
ClientV4Stats ClientV4Stats
}
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
type ServerRPCStats struct {
ReplyCache ReplyCache
FileHandles FileHandles
InputOutput InputOutput
Threads Threads
ReadAheadCache ReadAheadCache
Network Network
ServerRPC ServerRPC
V2Stats V2Stats
V3Stats V3Stats
ServerV4Stats ServerV4Stats
V4Ops V4Ops
}

317
vendor/github.com/prometheus/procfs/nfs/parse.go generated vendored Normal file
View File

@ -0,0 +1,317 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"fmt"
)
func parseReplyCache(v []uint64) (ReplyCache, error) {
if len(v) != 3 {
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
}
return ReplyCache{
Hits: v[0],
Misses: v[1],
NoCache: v[2],
}, nil
}
func parseFileHandles(v []uint64) (FileHandles, error) {
if len(v) != 5 {
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
}
return FileHandles{
Stale: v[0],
TotalLookups: v[1],
AnonLookups: v[2],
DirNoCache: v[3],
NoDirNoCache: v[4],
}, nil
}
func parseInputOutput(v []uint64) (InputOutput, error) {
if len(v) != 2 {
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
}
return InputOutput{
Read: v[0],
Write: v[1],
}, nil
}
func parseThreads(v []uint64) (Threads, error) {
if len(v) != 2 {
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
}
return Threads{
Threads: v[0],
FullCnt: v[1],
}, nil
}
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
if len(v) != 12 {
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
}
return ReadAheadCache{
CacheSize: v[0],
CacheHistogram: v[1:11],
NotFound: v[11],
}, nil
}
func parseNetwork(v []uint64) (Network, error) {
if len(v) != 4 {
return Network{}, fmt.Errorf("invalid Network line %q", v)
}
return Network{
NetCount: v[0],
UDPCount: v[1],
TCPCount: v[2],
TCPConnect: v[3],
}, nil
}
func parseServerRPC(v []uint64) (ServerRPC, error) {
if len(v) != 5 {
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
}
return ServerRPC{
RPCCount: v[0],
BadCnt: v[1],
BadFmt: v[2],
BadAuth: v[3],
BadcInt: v[4],
}, nil
}
func parseClientRPC(v []uint64) (ClientRPC, error) {
if len(v) != 3 {
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
}
return ClientRPC{
RPCCount: v[0],
Retransmissions: v[1],
AuthRefreshes: v[2],
}, nil
}
func parseV2Stats(v []uint64) (V2Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 18 {
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
}
return V2Stats{
Null: v[1],
GetAttr: v[2],
SetAttr: v[3],
Root: v[4],
Lookup: v[5],
ReadLink: v[6],
Read: v[7],
WrCache: v[8],
Write: v[9],
Create: v[10],
Remove: v[11],
Rename: v[12],
Link: v[13],
SymLink: v[14],
MkDir: v[15],
RmDir: v[16],
ReadDir: v[17],
FsStat: v[18],
}, nil
}
func parseV3Stats(v []uint64) (V3Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 22 {
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
}
return V3Stats{
Null: v[1],
GetAttr: v[2],
SetAttr: v[3],
Lookup: v[4],
Access: v[5],
ReadLink: v[6],
Read: v[7],
Write: v[8],
Create: v[9],
MkDir: v[10],
SymLink: v[11],
MkNod: v[12],
Remove: v[13],
RmDir: v[14],
Rename: v[15],
Link: v[16],
ReadDir: v[17],
ReadDirPlus: v[18],
FsStat: v[19],
FsInfo: v[20],
PathConf: v[21],
Commit: v[22],
}, nil
}
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
values := int(v[0])
if len(v[1:]) != values {
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
}
// This function currently supports mapping 59 NFS v4 client stats. Older
// kernels may emit fewer stats, so we must detect this and pad out the
// values to match the expected slice size.
if values < 59 {
newValues := make([]uint64, 60)
copy(newValues, v)
v = newValues
}
return ClientV4Stats{
Null: v[1],
Read: v[2],
Write: v[3],
Commit: v[4],
Open: v[5],
OpenConfirm: v[6],
OpenNoattr: v[7],
OpenDowngrade: v[8],
Close: v[9],
Setattr: v[10],
FsInfo: v[11],
Renew: v[12],
SetClientID: v[13],
SetClientIDConfirm: v[14],
Lock: v[15],
Lockt: v[16],
Locku: v[17],
Access: v[18],
Getattr: v[19],
Lookup: v[20],
LookupRoot: v[21],
Remove: v[22],
Rename: v[23],
Link: v[24],
Symlink: v[25],
Create: v[26],
Pathconf: v[27],
StatFs: v[28],
ReadLink: v[29],
ReadDir: v[30],
ServerCaps: v[31],
DelegReturn: v[32],
GetACL: v[33],
SetACL: v[34],
FsLocations: v[35],
ReleaseLockowner: v[36],
Secinfo: v[37],
FsidPresent: v[38],
ExchangeID: v[39],
CreateSession: v[40],
DestroySession: v[41],
Sequence: v[42],
GetLeaseTime: v[43],
ReclaimComplete: v[44],
LayoutGet: v[45],
GetDeviceInfo: v[46],
LayoutCommit: v[47],
LayoutReturn: v[48],
SecinfoNoName: v[49],
TestStateID: v[50],
FreeStateID: v[51],
GetDeviceList: v[52],
BindConnToSession: v[53],
DestroyClientID: v[54],
Seek: v[55],
Allocate: v[56],
DeAllocate: v[57],
LayoutStats: v[58],
Clone: v[59],
}, nil
}
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 2 {
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
}
return ServerV4Stats{
Null: v[1],
Compound: v[2],
}, nil
}
func parseV4Ops(v []uint64) (V4Ops, error) {
values := int(v[0])
if len(v[1:]) != values || values < 39 {
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
}
stats := V4Ops{
Op0Unused: v[1],
Op1Unused: v[2],
Op2Future: v[3],
Access: v[4],
Close: v[5],
Commit: v[6],
Create: v[7],
DelegPurge: v[8],
DelegReturn: v[9],
GetAttr: v[10],
GetFH: v[11],
Link: v[12],
Lock: v[13],
Lockt: v[14],
Locku: v[15],
Lookup: v[16],
LookupRoot: v[17],
Nverify: v[18],
Open: v[19],
OpenAttr: v[20],
OpenConfirm: v[21],
OpenDgrd: v[22],
PutFH: v[23],
PutPubFH: v[24],
PutRootFH: v[25],
Read: v[26],
ReadDir: v[27],
ReadLink: v[28],
Remove: v[29],
Rename: v[30],
Renew: v[31],
RestoreFH: v[32],
SaveFH: v[33],
SecInfo: v[34],
SetAttr: v[35],
Verify: v[36],
Write: v[37],
RelLockOwner: v[38],
}
return stats, nil
}

67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
stats := &ClientRPCStats{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
// require at least <key> <value>
if len(parts) < 2 {
return nil, fmt.Errorf("invalid NFS metric line %q", line)
}
values, err := util.ParseUint64s(parts[1:])
if err != nil {
return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
}
switch metricLine := parts[0]; metricLine {
case "net":
stats.Network, err = parseNetwork(values)
case "rpc":
stats.ClientRPC, err = parseClientRPC(values)
case "proc2":
stats.V2Stats, err = parseV2Stats(values)
case "proc3":
stats.V3Stats, err = parseV3Stats(values)
case "proc4":
stats.ClientV4Stats, err = parseClientV4Stats(values)
default:
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
}
if err != nil {
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning NFS file: %s", err)
}
return stats, nil
}

89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
stats := &ServerRPCStats{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
// require at least <key> <value>
if len(parts) < 2 {
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
}
label := parts[0]
var values []uint64
var err error
if label == "th" {
if len(parts) < 3 {
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
}
values, err = util.ParseUint64s(parts[1:3])
} else {
values, err = util.ParseUint64s(parts[1:])
}
if err != nil {
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
}
switch metricLine := parts[0]; metricLine {
case "rc":
stats.ReplyCache, err = parseReplyCache(values)
case "fh":
stats.FileHandles, err = parseFileHandles(values)
case "io":
stats.InputOutput, err = parseInputOutput(values)
case "th":
stats.Threads, err = parseThreads(values)
case "ra":
stats.ReadAheadCache, err = parseReadAheadCache(values)
case "net":
stats.Network, err = parseNetwork(values)
case "rpc":
stats.ServerRPC, err = parseServerRPC(values)
case "proc2":
stats.V2Stats, err = parseV2Stats(values)
case "proc3":
stats.V3Stats, err = parseV3Stats(values)
case "proc4":
stats.ServerV4Stats, err = parseServerV4Stats(values)
case "proc4ops":
stats.V4Ops, err = parseV4Ops(values)
default:
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
}
if err != nil {
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
}
return stats, nil
}

View File

@ -1,110 +0,0 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
// The PSI / pressure interface is described at
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
// Each resource (cpu, io, memory, ...) is exposed as a single file.
// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
// Each line contains several averages (over n seconds) and a total in µs.
//
// Example io pressure file:
// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
)
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
// PSILine is a single line of values as returned by /proc/pressure/*
// The Avg entries are averages over n seconds, as a percentage
// The Total line is in microseconds
type PSILine struct {
Avg10 float64
Avg60 float64
Avg300 float64
Total uint64
}
// PSIStats represent pressure stall information from /proc/pressure/*
// Some indicates the share of time in which at least some tasks are stalled
// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
type PSIStats struct {
Some *PSILine
Full *PSILine
}
// NewPSIStatsForResource reads pressure stall information for the specified
// resource. At time of writing this can be either "cpu", "memory" or "io".
func NewPSIStatsForResource(resource string) (PSIStats, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return PSIStats{}, err
}
return fs.NewPSIStatsForResource(resource)
}
// NewPSIStatsForResource reads pressure stall information from /proc/pressure/<resource>
func (fs FS) NewPSIStatsForResource(resource string) (PSIStats, error) {
file, err := os.Open(fs.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
}
defer file.Close()
return parsePSIStats(resource, file)
}
// parsePSIStats parses the specified file for pressure stall information
func parsePSIStats(resource string, file io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
stats, err := ioutil.ReadAll(file)
if err != nil {
return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource)
}
for _, l := range strings.Split(string(stats), "\n") {
prefix := strings.Split(l, " ")[0]
switch prefix {
case "some":
psi := PSILine{}
_, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
if err != nil {
return PSIStats{}, err
}
psiStats.Some = &psi
case "full":
psi := PSILine{}
_, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
if err != nil {
return PSIStats{}, err
}
psiStats.Full = &psi
default:
// If we encounter a line with an unknown prefix, ignore it and move on
// Should new measurement types be added in the future we'll simply ignore them instead
// of erroring on retrieval
continue
}
}
return psiStats, nil
}

View File

@ -95,7 +95,7 @@ type ProcStat struct {
// in clock ticks.
Starttime uint64
// Virtual memory size in bytes.
VSize uint
VSize int
// Resident set size in pages.
RSS int
@ -164,7 +164,7 @@ func (p Proc) NewStat() (ProcStat, error) {
}
// VirtualMemory returns the virtual memory size in bytes.
func (s ProcStat) VirtualMemory() uint {
func (s ProcStat) VirtualMemory() int {
return s.VSize
}

330
vendor/github.com/prometheus/procfs/xfs/parse.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseStats parses a Stats from an input io.Reader, using the format
// found in /proc/fs/xfs/stat.
func ParseStats(r io.Reader) (*Stats, error) {
const (
// Fields parsed into stats structures.
fieldExtentAlloc = "extent_alloc"
fieldAbt = "abt"
fieldBlkMap = "blk_map"
fieldBmbt = "bmbt"
fieldDir = "dir"
fieldTrans = "trans"
fieldIg = "ig"
fieldLog = "log"
fieldRw = "rw"
fieldAttr = "attr"
fieldIcluster = "icluster"
fieldVnodes = "vnodes"
fieldBuf = "buf"
fieldXpc = "xpc"
// Unimplemented at this time due to lack of documentation.
fieldPushAil = "push_ail"
fieldXstrat = "xstrat"
fieldAbtb2 = "abtb2"
fieldAbtc2 = "abtc2"
fieldBmbt2 = "bmbt2"
fieldIbt2 = "ibt2"
fieldFibt2 = "fibt2"
fieldQm = "qm"
fieldDebug = "debug"
)
var xfss Stats
s := bufio.NewScanner(r)
for s.Scan() {
// Expect at least a string label and a single integer value, ex:
// - abt 0
// - rw 1 2
ss := strings.Fields(string(s.Bytes()))
if len(ss) < 2 {
continue
}
label := ss[0]
// Extended precision counters are uint64 values.
if label == fieldXpc {
us, err := util.ParseUint64s(ss[1:])
if err != nil {
return nil, err
}
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
if err != nil {
return nil, err
}
continue
}
// All other counters are uint32 values.
us, err := util.ParseUint32s(ss[1:])
if err != nil {
return nil, err
}
switch label {
case fieldExtentAlloc:
xfss.ExtentAllocation, err = extentAllocationStats(us)
case fieldAbt:
xfss.AllocationBTree, err = btreeStats(us)
case fieldBlkMap:
xfss.BlockMapping, err = blockMappingStats(us)
case fieldBmbt:
xfss.BlockMapBTree, err = btreeStats(us)
case fieldDir:
xfss.DirectoryOperation, err = directoryOperationStats(us)
case fieldTrans:
xfss.Transaction, err = transactionStats(us)
case fieldIg:
xfss.InodeOperation, err = inodeOperationStats(us)
case fieldLog:
xfss.LogOperation, err = logOperationStats(us)
case fieldRw:
xfss.ReadWrite, err = readWriteStats(us)
case fieldAttr:
xfss.AttributeOperation, err = attributeOperationStats(us)
case fieldIcluster:
xfss.InodeClustering, err = inodeClusteringStats(us)
case fieldVnodes:
xfss.Vnode, err = vnodeStats(us)
case fieldBuf:
xfss.Buffer, err = bufferStats(us)
}
if err != nil {
return nil, err
}
}
return &xfss, s.Err()
}
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
if l := len(us); l != 4 {
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
}
return ExtentAllocationStats{
ExtentsAllocated: us[0],
BlocksAllocated: us[1],
ExtentsFreed: us[2],
BlocksFreed: us[3],
}, nil
}
// btreeStats builds a BTreeStats from a slice of uint32s.
func btreeStats(us []uint32) (BTreeStats, error) {
if l := len(us); l != 4 {
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
}
return BTreeStats{
Lookups: us[0],
Compares: us[1],
RecordsInserted: us[2],
RecordsDeleted: us[3],
}, nil
}
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
if l := len(us); l != 7 {
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
}
return BlockMappingStats{
Reads: us[0],
Writes: us[1],
Unmaps: us[2],
ExtentListInsertions: us[3],
ExtentListDeletions: us[4],
ExtentListLookups: us[5],
ExtentListCompares: us[6],
}, nil
}
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
if l := len(us); l != 4 {
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
}
return DirectoryOperationStats{
Lookups: us[0],
Creates: us[1],
Removes: us[2],
Getdents: us[3],
}, nil
}
// TransactionStats builds a TransactionStats from a slice of uint32s.
func transactionStats(us []uint32) (TransactionStats, error) {
if l := len(us); l != 3 {
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
}
return TransactionStats{
Sync: us[0],
Async: us[1],
Empty: us[2],
}, nil
}
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
if l := len(us); l != 7 {
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
}
return InodeOperationStats{
Attempts: us[0],
Found: us[1],
Recycle: us[2],
Missed: us[3],
Duplicate: us[4],
Reclaims: us[5],
AttributeChange: us[6],
}, nil
}
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
func logOperationStats(us []uint32) (LogOperationStats, error) {
if l := len(us); l != 5 {
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
}
return LogOperationStats{
Writes: us[0],
Blocks: us[1],
NoInternalBuffers: us[2],
Force: us[3],
ForceSleep: us[4],
}, nil
}
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
func readWriteStats(us []uint32) (ReadWriteStats, error) {
if l := len(us); l != 2 {
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
}
return ReadWriteStats{
Read: us[0],
Write: us[1],
}, nil
}
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
if l := len(us); l != 4 {
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
}
return AttributeOperationStats{
Get: us[0],
Set: us[1],
Remove: us[2],
List: us[3],
}, nil
}
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
if l := len(us); l != 3 {
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
}
return InodeClusteringStats{
Iflush: us[0],
Flush: us[1],
FlushInode: us[2],
}, nil
}
// VnodeStats builds a VnodeStats from a slice of uint32s.
func vnodeStats(us []uint32) (VnodeStats, error) {
// The attribute "Free" appears to not be available on older XFS
// stats versions. Therefore, 7 or 8 elements may appear in
// this slice.
l := len(us)
if l != 7 && l != 8 {
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
}
s := VnodeStats{
Active: us[0],
Allocate: us[1],
Get: us[2],
Hold: us[3],
Release: us[4],
Reclaim: us[5],
Remove: us[6],
}
// Skip adding free, unless it is present. The zero value will
// be used in place of an actual count.
if l == 7 {
return s, nil
}
s.Free = us[7]
return s, nil
}
// BufferStats builds a BufferStats from a slice of uint32s.
func bufferStats(us []uint32) (BufferStats, error) {
if l := len(us); l != 9 {
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
}
return BufferStats{
Get: us[0],
Create: us[1],
GetLocked: us[2],
GetLockedWaited: us[3],
BusyLocked: us[4],
MissLocked: us[5],
PageRetries: us[6],
PageFound: us[7],
GetRead: us[8],
}, nil
}
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
if l := len(us); l != 3 {
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
}
return ExtendedPrecisionStats{
FlushBytes: us[0],
WriteBytes: us[1],
ReadBytes: us[2],
}, nil
}

163
vendor/github.com/prometheus/procfs/xfs/xfs.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package xfs provides access to statistics exposed by the XFS filesystem.
package xfs
// Stats contains XFS filesystem runtime statistics, parsed from
// /proc/fs/xfs/stat.
//
// The names and meanings of each statistic were taken from
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
// kernel source. Most counters are uint32s (same data types used in
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
type Stats struct {
// The name of the filesystem used to source these statistics.
// If empty, this indicates aggregated statistics for all XFS
// filesystems on the host.
Name string
ExtentAllocation ExtentAllocationStats
AllocationBTree BTreeStats
BlockMapping BlockMappingStats
BlockMapBTree BTreeStats
DirectoryOperation DirectoryOperationStats
Transaction TransactionStats
InodeOperation InodeOperationStats
LogOperation LogOperationStats
ReadWrite ReadWriteStats
AttributeOperation AttributeOperationStats
InodeClustering InodeClusteringStats
Vnode VnodeStats
Buffer BufferStats
ExtendedPrecision ExtendedPrecisionStats
}
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
type ExtentAllocationStats struct {
ExtentsAllocated uint32
BlocksAllocated uint32
ExtentsFreed uint32
BlocksFreed uint32
}
// BTreeStats contains statistics regarding an XFS internal B-tree.
type BTreeStats struct {
Lookups uint32
Compares uint32
RecordsInserted uint32
RecordsDeleted uint32
}
// BlockMappingStats contains statistics regarding XFS block maps.
type BlockMappingStats struct {
Reads uint32
Writes uint32
Unmaps uint32
ExtentListInsertions uint32
ExtentListDeletions uint32
ExtentListLookups uint32
ExtentListCompares uint32
}
// DirectoryOperationStats contains statistics regarding XFS directory entries.
type DirectoryOperationStats struct {
Lookups uint32
Creates uint32
Removes uint32
Getdents uint32
}
// TransactionStats contains statistics regarding XFS metadata transactions.
type TransactionStats struct {
Sync uint32
Async uint32
Empty uint32
}
// InodeOperationStats contains statistics regarding XFS inode operations.
type InodeOperationStats struct {
Attempts uint32
Found uint32
Recycle uint32
Missed uint32
Duplicate uint32
Reclaims uint32
AttributeChange uint32
}
// LogOperationStats contains statistics regarding the XFS log buffer.
type LogOperationStats struct {
Writes uint32
Blocks uint32
NoInternalBuffers uint32
Force uint32
ForceSleep uint32
}
// ReadWriteStats contains statistics regarding the number of read and write
// system calls for XFS filesystems.
type ReadWriteStats struct {
Read uint32
Write uint32
}
// AttributeOperationStats contains statistics regarding manipulation of
// XFS extended file attributes.
type AttributeOperationStats struct {
Get uint32
Set uint32
Remove uint32
List uint32
}
// InodeClusteringStats contains statistics regarding XFS inode clustering
// operations.
type InodeClusteringStats struct {
Iflush uint32
Flush uint32
FlushInode uint32
}
// VnodeStats contains statistics regarding XFS vnode operations.
type VnodeStats struct {
Active uint32
Allocate uint32
Get uint32
Hold uint32
Release uint32
Reclaim uint32
Remove uint32
Free uint32
}
// BufferStats contains statistics regarding XFS read/write I/O buffers.
type BufferStats struct {
Get uint32
Create uint32
GetLocked uint32
GetLockedWaited uint32
BusyLocked uint32
MissLocked uint32
PageRetries uint32
PageFound uint32
GetRead uint32
}
// ExtendedPrecisionStats contains high precision counters used to track the
// total number of bytes read, written, or flushed, during XFS operations.
type ExtendedPrecisionStats struct {
FlushBytes uint64
WriteBytes uint64
ReadBytes uint64
}

View File

@ -1,22 +1,21 @@
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
MIT License
Please consider promoting this project if you find it useful.
Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -39,7 +39,7 @@ type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
// for table driven tests.
type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful
// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
@ -179,7 +179,11 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
return ""
}
if len(msgAndArgs) == 1 {
return msgAndArgs[0].(string)
msg := msgAndArgs[0]
if msgAsStr, ok := msg.(string); ok {
return msgAsStr
}
return fmt.Sprintf("%+v", msg)
}
if len(msgAndArgs) > 1 {
return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
@ -415,6 +419,17 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
}
// containsKind checks if a specified kind in the slice of kinds.
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
for i := 0; i < len(kinds); i++ {
if kind == kinds[i] {
return true
}
}
return false
}
// isNil checks if a specified object is nil or not, without Failing.
func isNil(object interface{}) bool {
if object == nil {
@ -423,7 +438,14 @@ func isNil(object interface{}) bool {
value := reflect.ValueOf(object)
kind := value.Kind()
if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
isNilableKind := containsKind(
[]reflect.Kind{
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice},
kind)
if isNilableKind && value.IsNil() {
return true
}
@ -1327,7 +1349,7 @@ func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
}
// diff returns a diff of both values as long as both are of the same type and
// are a struct, map, slice or array. Otherwise it returns an empty string.
// are a struct, map, slice, array or string. Otherwise it returns an empty string.
func diff(expected interface{}, actual interface{}) string {
if expected == nil || actual == nil {
return ""
@ -1345,7 +1367,7 @@ func diff(expected interface{}, actual interface{}) string {
}
var e, a string
if ek != reflect.String {
if et != reflect.TypeOf("") {
e = spewConfig.Sdump(expected)
a = spewConfig.Sdump(actual)
} else {

View File

@ -22,7 +22,7 @@ type ValueAssertionFunc func(TestingT, interface{}, ...interface{})
// for table driven tests.
type BoolAssertionFunc func(TestingT, bool, ...interface{})
// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful
// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{})

22
vendor/k8s.io/klog/README.md generated vendored
View File

@ -1,7 +1,27 @@
klog
====
klog is a permanant fork of https://github.com/golang/glog. original README from glog is below
klog is a permanent fork of https://github.com/golang/glog.
## Why was klog created?
The decision to create klog was one that wasn't made lightly, but it was necessary due to some
drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README:
> The code in this repo [...] is not itself under development
This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below:
* `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented.
* `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it
* A long term goal is to implement a logging interface that allows us to add context, change output format, etc.
Historical context is available here:
* https://github.com/kubernetes/kubernetes/issues/61006
* https://github.com/kubernetes/kubernetes/issues/70264
* https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ
* https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ
----

View File

@ -1,10 +1,10 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Team to reach out
# They are the contact point for the Product Security Committee to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE

46
vendor/k8s.io/klog/klog.go generated vendored
View File

@ -78,6 +78,7 @@ import (
"fmt"
"io"
stdLog "log"
"math"
"os"
"path/filepath"
"runtime"
@ -410,10 +411,14 @@ func InitFlags(flagset *flag.FlagSet) {
}
flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory")
flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file")
flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800,
"Defines the maximum size a log file can grow to. Unit is megabytes. "+
"If the value is 0, the maximum file size is unlimited.")
flagset.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files")
flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
flagset.Var(&logging.verbosity, "v", "number for the log level verbosity")
flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when openning log files")
flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
@ -471,8 +476,15 @@ type loggingT struct {
// with the log-dir option.
logFile string
// When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
// logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
logFileMaxSizeMB uint64
// If true, do not add the prefix headers, useful when used with SetOutput
skipHeaders bool
// If true, do not add the headers to log files
skipLogHeaders bool
}
// buffer holds a byte Buffer for reuse. The zero value is ready for use.
@ -861,17 +873,32 @@ func (l *loggingT) exit(err error) {
type syncBuffer struct {
logger *loggingT
*bufio.Writer
file *os.File
sev severity
nbytes uint64 // The number of bytes written to this file
file *os.File
sev severity
nbytes uint64 // The number of bytes written to this file
maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
}
func (sb *syncBuffer) Sync() error {
return sb.file.Sync()
}
// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
func CalculateMaxSize() uint64 {
if logging.logFile != "" {
if logging.logFileMaxSizeMB == 0 {
// If logFileMaxSizeMB is zero, we don't have limitations on the log size.
return math.MaxUint64
}
// Flag logFileMaxSizeMB is in MB for user convenience.
return logging.logFileMaxSizeMB * 1024 * 1024
}
// If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
return MaxSize
}
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
if sb.nbytes+uint64(len(p)) >= MaxSize {
if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
if err := sb.rotateFile(time.Now(), false); err != nil {
sb.logger.exit(err)
}
@ -886,7 +913,7 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) {
// rotateFile closes the syncBuffer's file and starts a new one.
// The startup argument indicates whether this is the initial startup of klog.
// If startup is true, existing files are opened for apending instead of truncated.
// If startup is true, existing files are opened for appending instead of truncated.
func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
if sb.file != nil {
sb.Flush()
@ -901,6 +928,10 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
if sb.logger.skipLogHeaders {
return nil
}
// Write header.
var buf bytes.Buffer
fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
@ -925,8 +956,9 @@ func (l *loggingT) createFiles(sev severity) error {
// has already been created, we can stop.
for s := sev; s >= infoLog && l.file[s] == nil; s-- {
sb := &syncBuffer{
logger: l,
sev: s,
logger: l,
sev: s,
maxbytes: CalculateMaxSize(),
}
if err := sb.rotateFile(now, true); err != nil {
return err

2
vendor/k8s.io/klog/klog_file.go generated vendored
View File

@ -98,7 +98,7 @@ var onceLogDirs sync.Once
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
// The startup argument indicates whether this is the initial startup of klog.
// If startup is true, existing files are opened for apending instead of truncated.
// If startup is true, existing files are opened for appending instead of truncated.
func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
if logging.logFile != "" {
f, err := openOrCreate(logging.logFile, startup)

View File

@ -18,10 +18,15 @@ package io
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
)
// ErrLimitReached means that the read limit is reached.
var ErrLimitReached = errors.New("the read limit is reached")
// ConsistentRead repeatedly reads a file until it gets the same content twice.
// This is useful when reading files in /proc that are larger than page size
// and kernel may modify them between individual read() syscalls.
@ -53,3 +58,17 @@ func consistentReadSync(filename string, attempts int, sync func(int)) ([]byte,
}
return nil, fmt.Errorf("could not get consistent content of %s after %d attempts", filename, attempts)
}
// ReadAtMost reads up to `limit` bytes from `r`, and reports an error
// when `limit` bytes are read.
func ReadAtMost(r io.Reader, limit int64) ([]byte, error) {
limitedReader := &io.LimitedReader{R: r, N: limit}
data, err := ioutil.ReadAll(limitedReader)
if err != nil {
return data, err
}
if limitedReader.N <= 0 {
return data, ErrLimitReached
}
return data, nil
}

14
vendor/k8s.io/utils/mount/OWNERS generated vendored Normal file
View File

@ -0,0 +1,14 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- jingxu97
- saad-ali
- jsafrane
- msau42
- andyzhangx
- gnufied
approvers:
- jingxu97
- saad-ali
- jsafrane

18
vendor/k8s.io/utils/mount/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package mount defines an interface to mounting filesystems.
package mount // import "k8s.io/utils/mount"

207
vendor/k8s.io/utils/mount/fake_mounter.go generated vendored Normal file
View File

@ -0,0 +1,207 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"os"
"path/filepath"
"sync"
"k8s.io/klog"
)
// FakeMounter implements mount.Interface for tests.
type FakeMounter struct {
MountPoints []MountPoint
log []FakeAction
// Error to return for a path when calling IsLikelyNotMountPoint
MountCheckErrors map[string]error
// Some tests run things in parallel, make sure the mounter does not produce
// any golang's DATA RACE warnings.
mutex sync.Mutex
UnmountFunc UnmountFunc
}
// UnmountFunc is a function callback to be executed during the Unmount() call.
type UnmountFunc func(path string) error
var _ Interface = &FakeMounter{}
const (
// FakeActionMount is the string for specifying mount as FakeAction.Action
FakeActionMount = "mount"
// FakeActionUnmount is the string for specifying unmount as FakeAction.Action
FakeActionUnmount = "unmount"
)
// FakeAction objects are logged every time a fake mount or unmount is called.
type FakeAction struct {
Action string // "mount" or "unmount"
Target string // applies to both mount and unmount actions
Source string // applies only to "mount" actions
FSType string // applies only to "mount" actions
}
// NewFakeMounter returns a FakeMounter struct that implements Interface and is
// suitable for testing purposes.
func NewFakeMounter(mps []MountPoint) *FakeMounter {
return &FakeMounter{
MountPoints: mps,
}
}
// ResetLog clears all the log entries in FakeMounter
func (f *FakeMounter) ResetLog() {
f.mutex.Lock()
defer f.mutex.Unlock()
f.log = []FakeAction{}
}
// GetLog returns the slice of FakeActions taken by the mounter
func (f *FakeMounter) GetLog() []FakeAction {
f.mutex.Lock()
defer f.mutex.Unlock()
return f.log
}
// Mount records the mount event and updates the in-memory mount points for FakeMounter
func (f *FakeMounter) Mount(source string, target string, fstype string, options []string) error {
f.mutex.Lock()
defer f.mutex.Unlock()
opts := []string{}
for _, option := range options {
// find 'bind' option
if option == "bind" {
// This is a bind-mount. In order to mimic linux behaviour, we must
// use the original device of the bind-mount as the real source.
// E.g. when mounted /dev/sda like this:
// $ mount /dev/sda /mnt/test
// $ mount -o bind /mnt/test /mnt/bound
// then /proc/mount contains:
// /dev/sda /mnt/test
// /dev/sda /mnt/bound
// (and not /mnt/test /mnt/bound)
// I.e. we must use /dev/sda as source instead of /mnt/test in the
// bind mount.
for _, mnt := range f.MountPoints {
if source == mnt.Path {
source = mnt.Device
break
}
}
}
// reuse MountPoint.Opts field to mark mount as readonly
opts = append(opts, option)
}
// If target is a symlink, get its absolute path
absTarget, err := filepath.EvalSymlinks(target)
if err != nil {
absTarget = target
}
f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts})
klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget)
f.log = append(f.log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype})
return nil
}
// Unmount records the unmount event and updates the in-memory mount points for FakeMounter
func (f *FakeMounter) Unmount(target string) error {
f.mutex.Lock()
defer f.mutex.Unlock()
// If target is a symlink, get its absolute path
absTarget, err := filepath.EvalSymlinks(target)
if err != nil {
absTarget = target
}
newMountpoints := []MountPoint{}
for _, mp := range f.MountPoints {
if mp.Path == absTarget {
if f.UnmountFunc != nil {
err := f.UnmountFunc(absTarget)
if err != nil {
return err
}
}
klog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget)
// Don't copy it to newMountpoints
continue
}
newMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type})
}
f.MountPoints = newMountpoints
f.log = append(f.log, FakeAction{Action: FakeActionUnmount, Target: absTarget})
delete(f.MountCheckErrors, target)
return nil
}
// List returns all the in-memory mountpoints for FakeMounter
func (f *FakeMounter) List() ([]MountPoint, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
return f.MountPoints, nil
}
// IsLikelyNotMountPoint determines whether a path is a mountpoint by checking
// if the absolute path to file is in the in-memory mountpoints
func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
err := f.MountCheckErrors[file]
if err != nil {
return false, err
}
_, err = os.Stat(file)
if err != nil {
return true, err
}
// If file is a symlink, get its absolute path
absFile, err := filepath.EvalSymlinks(file)
if err != nil {
absFile = file
}
for _, mp := range f.MountPoints {
if mp.Path == absFile {
klog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path)
return false, nil
}
}
klog.V(5).Infof("isLikelyNotMountPoint for %s: true", file)
return true, nil
}
// GetMountRefs finds all mount references to the path, returns a
// list of paths.
func (f *FakeMounter) GetMountRefs(pathname string) ([]string, error) {
realpath, err := filepath.EvalSymlinks(pathname)
if err != nil {
// Ignore error in FakeMounter, because we actually didn't create files.
realpath = pathname
}
return getMountRefsByDev(f, realpath)
}

300
vendor/k8s.io/utils/mount/mount.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(thockin): This whole pkg is pretty linux-centric. As soon as we have
// an alternate platform, we will need to abstract further.
package mount
import (
"fmt"
"os"
"path/filepath"
"strings"
utilexec "k8s.io/utils/exec"
)
const (
// Default mount command if mounter path is not specified.
defaultMountCommand = "mount"
)
// Interface defines the set of methods to allow for mount operations on a system.
type Interface interface {
// Mount mounts source to target as fstype with given options.
Mount(source string, target string, fstype string, options []string) error
// Unmount unmounts given target.
Unmount(target string) error
// List returns a list of all mounted filesystems. This can be large.
// On some platforms, reading mounts directly from the OS is not guaranteed
// consistent (i.e. it could change between chunked reads). This is guaranteed
// to be consistent.
List() ([]MountPoint, error)
// IsLikelyNotMountPoint uses heuristics to determine if a directory
// is not a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsLikelyNotMountPoint does NOT properly detect all mountpoint types
// most notably linux bind mounts and symbolic link. For callers that do not
// care about such situations, this is a faster alternative to calling List()
// and scanning that output.
IsLikelyNotMountPoint(file string) (bool, error)
// GetMountRefs finds all mount references to pathname, returning a slice of
// paths. Pathname can be a mountpoint path or a normal directory
// (for bind mount). On Linux, pathname is excluded from the slice.
// For example, if /dev/sdc was mounted at /path/a and /path/b,
// GetMountRefs("/path/a") would return ["/path/b"]
// GetMountRefs("/path/b") would return ["/path/a"]
// On Windows there is no way to query all mount points; as long as pathname is
// a valid mount, it will be returned.
GetMountRefs(pathname string) ([]string, error)
}
// Compile-time check to ensure all Mounter implementations satisfy
// the mount interface.
var _ Interface = &Mounter{}
// MountPoint represents a single line in /proc/mounts or /etc/fstab.
type MountPoint struct {
Device string
Path string
Type string
Opts []string
Freq int
Pass int
}
type MountErrorType string
const (
FilesystemMismatch MountErrorType = "FilesystemMismatch"
HasFilesystemErrors MountErrorType = "HasFilesystemErrors"
UnformattedReadOnly MountErrorType = "UnformattedReadOnly"
FormatFailed MountErrorType = "FormatFailed"
GetDiskFormatFailed MountErrorType = "GetDiskFormatFailed"
UnknownMountError MountErrorType = "UnknownMountError"
)
type MountError struct {
Type MountErrorType
Message string
}
func (mountError MountError) String() string {
return mountError.Message
}
func (mountError MountError) Error() string {
return mountError.Message
}
func NewMountError(mountErrorValue MountErrorType, format string, args ...interface{}) error {
mountError := MountError{
Type: mountErrorValue,
Message: fmt.Sprintf(format, args...),
}
return mountError
}
// SafeFormatAndMount probes a device to see if it is formatted.
// Namely it checks to see if a file system is present. If so it
// mounts it otherwise the device is formatted first then mounted.
type SafeFormatAndMount struct {
Interface
Exec utilexec.Interface
}
// FormatAndMount formats the given disk, if needed, and mounts it.
// That is if the disk is not formatted and it is not being mounted as
// read-only it will format it first then mount it. Otherwise, if the
// disk is already formatted or it is being mounted as read-only, it
// will be mounted without formatting.
func (mounter *SafeFormatAndMount) FormatAndMount(source string, target string, fstype string, options []string) error {
return mounter.formatAndMount(source, target, fstype, options)
}
// getMountRefsByDev finds all references to the device provided
// by mountPath; returns a list of paths.
// Note that mountPath should be path after the evaluation of any symblolic links.
func getMountRefsByDev(mounter Interface, mountPath string) ([]string, error) {
mps, err := mounter.List()
if err != nil {
return nil, err
}
// Finding the device mounted to mountPath.
diskDev := ""
for i := range mps {
if mountPath == mps[i].Path {
diskDev = mps[i].Device
break
}
}
// Find all references to the device.
var refs []string
for i := range mps {
if mps[i].Device == diskDev || mps[i].Device == mountPath {
if mps[i].Path != mountPath {
refs = append(refs, mps[i].Path)
}
}
}
return refs, nil
}
// GetDeviceNameFromMount given a mnt point, find the device from /proc/mounts
// returns the device name, reference count, and error code.
func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, error) {
mps, err := mounter.List()
if err != nil {
return "", 0, err
}
// Find the device name.
// FIXME if multiple devices mounted on the same mount path, only the first one is returned.
device := ""
// If mountPath is symlink, need get its target path.
slTarget, err := filepath.EvalSymlinks(mountPath)
if err != nil {
slTarget = mountPath
}
for i := range mps {
if mps[i].Path == slTarget {
device = mps[i].Device
break
}
}
// Find all references to the device.
refCount := 0
for i := range mps {
if mps[i].Device == device {
refCount++
}
}
return device, refCount, nil
}
// IsNotMountPoint determines if a directory is a mountpoint.
// It should return ErrNotExist when the directory does not exist.
// IsNotMountPoint is more expensive than IsLikelyNotMountPoint.
// IsNotMountPoint detects bind mounts in linux.
// IsNotMountPoint enumerates all the mountpoints using List() and
// the list of mountpoints may be large, then it uses
// isMountPointMatch to evaluate whether the directory is a mountpoint.
func IsNotMountPoint(mounter Interface, file string) (bool, error) {
// IsLikelyNotMountPoint provides a quick check
// to determine whether file IS A mountpoint.
notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file)
if notMntErr != nil && os.IsPermission(notMntErr) {
// We were not allowed to do the simple stat() check, e.g. on NFS with
// root_squash. Fall back to /proc/mounts check below.
notMnt = true
notMntErr = nil
}
if notMntErr != nil {
return notMnt, notMntErr
}
// identified as mountpoint, so return this fact.
if notMnt == false {
return notMnt, nil
}
// Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts.
resolvedFile, err := filepath.EvalSymlinks(file)
if err != nil {
return true, err
}
// check all mountpoints since IsLikelyNotMountPoint
// is not reliable for some mountpoint types.
mountPoints, mountPointsErr := mounter.List()
if mountPointsErr != nil {
return notMnt, mountPointsErr
}
for _, mp := range mountPoints {
if isMountPointMatch(mp, resolvedFile) {
notMnt = false
break
}
}
return notMnt, nil
}
// MakeBindOpts detects whether a bind mount is being requested and makes the remount options to
// use in case of bind mount, due to the fact that bind mount doesn't respect mount options.
// The list equals:
// options - 'bind' + 'remount' (no duplicate)
func MakeBindOpts(options []string) (bool, []string, []string) {
// Because we have an FD opened on the subpath bind mount, the "bind" option
// needs to be included, otherwise the mount target will error as busy if you
// remount as readonly.
//
// As a consequence, all read only bind mounts will no longer change the underlying
// volume mount to be read only.
bindRemountOpts := []string{"bind", "remount"}
bind := false
bindOpts := []string{"bind"}
// _netdev is a userspace mount option and does not automatically get added when
// bind mount is created and hence we must carry it over.
if checkForNetDev(options) {
bindOpts = append(bindOpts, "_netdev")
}
for _, option := range options {
switch option {
case "bind":
bind = true
break
case "remount":
break
default:
bindRemountOpts = append(bindRemountOpts, option)
}
}
return bind, bindOpts, bindRemountOpts
}
func checkForNetDev(options []string) bool {
for _, option := range options {
if option == "_netdev" {
return true
}
}
return false
}
// PathWithinBase checks if give path is within given base directory.
func PathWithinBase(fullPath, basePath string) bool {
rel, err := filepath.Rel(basePath, fullPath)
if err != nil {
return false
}
if StartsWithBackstep(rel) {
// Needed to escape the base path.
return false
}
return true
}
// StartsWithBackstep checks if the given path starts with a backstep segment.
func StartsWithBackstep(rel string) bool {
// normalize to / and check for ../
return rel == ".." || strings.HasPrefix(filepath.ToSlash(rel), "../")
}

99
vendor/k8s.io/utils/mount/mount_helper_common.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"os"
"k8s.io/klog"
)
// CleanupMountPoint unmounts the given path and deletes the remaining directory
// if successful. If extensiveMountPointCheck is true IsNotMountPoint will be
// called instead of IsLikelyNotMountPoint. IsNotMountPoint is more expensive
// but properly handles bind mounts within the same fs.
func CleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool) error {
pathExists, pathErr := PathExists(mountPath)
if !pathExists {
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
corruptedMnt := IsCorruptedMnt(pathErr)
if pathErr != nil && !corruptedMnt {
return fmt.Errorf("Error checking path: %v", pathErr)
}
return doCleanupMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
}
// doCleanupMountPoint unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs.
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, and the mount point check
// will be skipped
func doCleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
if !corruptedMnt {
var notMnt bool
var err error
if extensiveMountPointCheck {
notMnt, err = IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
}
if err != nil {
return err
}
if notMnt {
klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
}
// Unmount the mount path
klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
if err := mounter.Unmount(mountPath); err != nil {
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
if mntErr != nil {
return mntErr
}
if notMnt {
klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return fmt.Errorf("Failed to unmount path %v", mountPath)
}
// PathExists returns true if the specified path exists.
// TODO: clean this up to use pkg/util/file/FileExists
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else if IsCorruptedMnt(err) {
return true, err
}
return false, err
}

158
vendor/k8s.io/utils/mount/mount_helper_unix.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
// +build !windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"os"
"strconv"
"strings"
"syscall"
utilio "k8s.io/utils/io"
)
const (
// At least number of fields per line in /proc/<pid>/mountinfo.
expectedAtLeastNumFieldsPerMountInfo = 10
// How many times to retry for a consistent read of /proc/mounts.
maxListTries = 3
)
// IsCorruptedMnt return true if err is about corrupted mount point
func IsCorruptedMnt(err error) bool {
if err == nil {
return false
}
var underlyingError error
switch pe := err.(type) {
case nil:
return false
case *os.PathError:
underlyingError = pe.Err
case *os.LinkError:
underlyingError = pe.Err
case *os.SyscallError:
underlyingError = pe.Err
}
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES
}
// MountInfo represents a single line in /proc/<pid>/mountinfo.
type MountInfo struct {
// Unique ID for the mount (maybe reused after umount).
ID int
// The ID of the parent mount (or of self for the root of this mount namespace's mount tree).
ParentID int
// Major indicates one half of the device ID which identifies the device class
// (parsed from `st_dev` for files on this filesystem).
Major int
// Minor indicates one half of the device ID which identifies a specific
// instance of device (parsed from `st_dev` for files on this filesystem).
Minor int
// The pathname of the directory in the filesystem which forms the root of this mount.
Root string
// Mount source, filesystem-specific information. e.g. device, tmpfs name.
Source string
// Mount point, the pathname of the mount point.
MountPoint string
// Optional fieds, zero or more fields of the form "tag[:value]".
OptionalFields []string
// The filesystem type in the form "type[.subtype]".
FsType string
// Per-mount options.
MountOptions []string
// Per-superblock options.
SuperOptions []string
}
// ParseMountInfo parses /proc/xxx/mountinfo.
func ParseMountInfo(filename string) ([]MountInfo, error) {
content, err := utilio.ConsistentRead(filename, maxListTries)
if err != nil {
return []MountInfo{}, err
}
contentStr := string(content)
infos := []MountInfo{}
for _, line := range strings.Split(contentStr, "\n") {
if line == "" {
// the last split() item is empty string following the last \n
continue
}
// See `man proc` for authoritative description of format of the file.
fields := strings.Fields(line)
if len(fields) < expectedAtLeastNumFieldsPerMountInfo {
return nil, fmt.Errorf("wrong number of fields in (expected at least %d, got %d): %s", expectedAtLeastNumFieldsPerMountInfo, len(fields), line)
}
id, err := strconv.Atoi(fields[0])
if err != nil {
return nil, err
}
parentID, err := strconv.Atoi(fields[1])
if err != nil {
return nil, err
}
mm := strings.Split(fields[2], ":")
if len(mm) != 2 {
return nil, fmt.Errorf("parsing '%s' failed: unexpected minor:major pair %s", line, mm)
}
major, err := strconv.Atoi(mm[0])
if err != nil {
return nil, fmt.Errorf("parsing '%s' failed: unable to parse major device id, err:%v", mm[0], err)
}
minor, err := strconv.Atoi(mm[1])
if err != nil {
return nil, fmt.Errorf("parsing '%s' failed: unable to parse minor device id, err:%v", mm[1], err)
}
info := MountInfo{
ID: id,
ParentID: parentID,
Major: major,
Minor: minor,
Root: fields[3],
MountPoint: fields[4],
MountOptions: strings.Split(fields[5], ","),
}
// All fields until "-" are "optional fields".
i := 6
for ; i < len(fields) && fields[i] != "-"; i++ {
info.OptionalFields = append(info.OptionalFields, fields[i])
}
// Parse the rest 3 fields.
i++
if len(fields)-i < 3 {
return nil, fmt.Errorf("expect 3 fields in %s, got %d", line, len(fields)-i)
}
info.FsType = fields[i]
info.Source = fields[i+1]
info.SuperOptions = strings.Split(fields[i+2], ",")
infos = append(infos, info)
}
return infos, nil
}
// isMountPointMatch returns true if the path in mp is the same as dir.
// Handles case where mountpoint dir has been renamed due to stale NFS mount.
func isMountPointMatch(mp MountPoint, dir string) bool {
deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
return ((mp.Path == dir) || (mp.Path == deletedDir))
}

101
vendor/k8s.io/utils/mount/mount_helper_windows.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// +build windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"os"
"strconv"
"strings"
"syscall"
"k8s.io/klog"
)
// following failure codes are from https://docs.microsoft.com/en-us/windows/desktop/debug/system-error-codes--1300-1699-
// ERROR_BAD_NETPATH = 53
// ERROR_NETWORK_BUSY = 54
// ERROR_UNEXP_NET_ERR = 59
// ERROR_NETNAME_DELETED = 64
// ERROR_NETWORK_ACCESS_DENIED = 65
// ERROR_BAD_DEV_TYPE = 66
// ERROR_BAD_NET_NAME = 67
// ERROR_SESSION_CREDENTIAL_CONFLICT = 1219
// ERROR_LOGON_FAILURE = 1326
var errorNoList = [...]int{53, 54, 59, 64, 65, 66, 67, 1219, 1326}
// IsCorruptedMnt return true if err is about corrupted mount point
func IsCorruptedMnt(err error) bool {
if err == nil {
return false
}
var underlyingError error
switch pe := err.(type) {
case nil:
return false
case *os.PathError:
underlyingError = pe.Err
case *os.LinkError:
underlyingError = pe.Err
case *os.SyscallError:
underlyingError = pe.Err
}
if ee, ok := underlyingError.(syscall.Errno); ok {
for _, errno := range errorNoList {
if int(ee) == errno {
klog.Warningf("IsCorruptedMnt failed with error: %v, error code: %v", err, errno)
return true
}
}
}
return false
}
// NormalizeWindowsPath makes sure the given path is a valid path on Windows
// systems by making sure all instances of `/` are replaced with `\\`, and the
// path beings with `c:`
func NormalizeWindowsPath(path string) string {
normalizedPath := strings.Replace(path, "/", "\\", -1)
if strings.HasPrefix(normalizedPath, "\\") {
normalizedPath = "c:" + normalizedPath
}
return normalizedPath
}
// ValidateDiskNumber : disk number should be a number in [0, 99]
func ValidateDiskNumber(disk string) error {
diskNum, err := strconv.Atoi(disk)
if err != nil {
return fmt.Errorf("wrong disk number format: %q, err:%v", disk, err)
}
if diskNum < 0 || diskNum > 99 {
return fmt.Errorf("disk number out of range: %q", disk)
}
return nil
}
// isMountPointMatch determines if the mountpoint matches the dir
func isMountPointMatch(mp MountPoint, dir string) bool {
return mp.Path == dir
}

536
vendor/k8s.io/utils/mount/mount_linux.go generated vendored Normal file
View File

@ -0,0 +1,536 @@
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"k8s.io/klog"
utilexec "k8s.io/utils/exec"
utilio "k8s.io/utils/io"
)
const (
// Number of fields per line in /proc/mounts as per the fstab man page.
expectedNumFieldsPerLine = 6
// Location of the mount file to use
procMountsPath = "/proc/mounts"
// Location of the mountinfo file
procMountInfoPath = "/proc/self/mountinfo"
// 'fsck' found errors and corrected them
fsckErrorsCorrected = 1
// 'fsck' found errors but exited without correcting them
fsckErrorsUncorrected = 4
)
// Mounter provides the default implementation of mount.Interface
// for the linux platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
withSystemd bool
}
// New returns a mount.Interface for the current system.
// It provides options to override the default mounter behavior.
// mounterPath allows using an alternative to `/bin/mount` for mounting.
func New(mounterPath string) Interface {
return &Mounter{
mounterPath: mounterPath,
withSystemd: detectSystemd(),
}
}
// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
// be an empty string in case it's not required, e.g. for remount, or for auto filesystem
// type, where kernel handles fstype for you. The mount 'options' is a list of options,
// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
// required, call Mount with an empty string list or nil.
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
// All Linux distros are expected to be shipped with a mount utility that a support bind mounts.
mounterPath := ""
bind, bindOpts, bindRemountOpts := MakeBindOpts(options)
if bind {
err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts)
if err != nil {
return err
}
return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)
}
// The list of filesystems that require containerized mounter on GCI image cluster
fsTypesNeedMounter := map[string]struct{}{
"nfs": {},
"glusterfs": {},
"ceph": {},
"cifs": {},
}
if _, ok := fsTypesNeedMounter[fstype]; ok {
mounterPath = mounter.mounterPath
}
return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options)
}
// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used.
func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error {
mountArgs := MakeMountArgs(source, target, fstype, options)
if len(mounterPath) > 0 {
mountArgs = append([]string{mountCmd}, mountArgs...)
mountCmd = mounterPath
}
if mounter.withSystemd {
// Try to run mount via systemd-run --scope. This will escape the
// service where kubelet runs and any fuse daemons will be started in a
// specific scope. kubelet service than can be restarted without killing
// these fuse daemons.
//
// Complete command line (when mounterPath is not used):
// systemd-run --description=... --scope -- mount -t <type> <what> <where>
//
// Expected flow:
// * systemd-run creates a transient scope (=~ cgroup) and executes its
// argument (/bin/mount) there.
// * mount does its job, forks a fuse daemon if necessary and finishes.
// (systemd-run --scope finishes at this point, returning mount's exit
// code and stdout/stderr - thats one of --scope benefits).
// * systemd keeps the fuse daemon running in the scope (i.e. in its own
// cgroup) until the fuse daemon dies (another --scope benefit).
// Kubelet service can be restarted and the fuse daemon survives.
// * When the fuse daemon dies (e.g. during unmount) systemd removes the
// scope automatically.
//
// systemd-mount is not used because it's too new for older distros
// (CentOS 7, Debian Jessie).
mountCmd, mountArgs = AddSystemdScope("systemd-run", target, mountCmd, mountArgs)
} else {
// No systemd-run on the host (or we failed to check it), assume kubelet
// does not run as a systemd service.
// No code here, mountCmd and mountArgs are already populated.
}
klog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs)
command := exec.Command(mountCmd, mountArgs...)
output, err := command.CombinedOutput()
if err != nil {
args := strings.Join(mountArgs, " ")
klog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output))
return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s",
err, mountCmd, args, string(output))
}
return err
}
// detectSystemd returns true if OS runs with systemd as init. When not sure
// (permission errors, ...), it returns false.
// There may be different ways how to detect systemd, this one makes sure that
// systemd-runs (needed by Mount()) works.
func detectSystemd() bool {
if _, err := exec.LookPath("systemd-run"); err != nil {
klog.V(2).Infof("Detected OS without systemd")
return false
}
// Try to run systemd-run --scope /bin/true, that should be enough
// to make sure that systemd is really running and not just installed,
// which happens when running in a container with a systemd-based image
// but with different pid 1.
cmd := exec.Command("systemd-run", "--description=Kubernetes systemd probe", "--scope", "true")
output, err := cmd.CombinedOutput()
if err != nil {
klog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS")
klog.V(4).Infof("systemd-run failed with: %v", err)
klog.V(4).Infof("systemd-run output: %s", string(output))
return false
}
klog.V(2).Infof("Detected OS with systemd")
return true
}
// MakeMountArgs makes the arguments to the mount(8) command.
// Implementation is shared with NsEnterMounter
func MakeMountArgs(source, target, fstype string, options []string) []string {
// Build mount command as follows:
// mount [-t $fstype] [-o $options] [$source] $target
mountArgs := []string{}
if len(fstype) > 0 {
mountArgs = append(mountArgs, "-t", fstype)
}
if len(options) > 0 {
mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
}
if len(source) > 0 {
mountArgs = append(mountArgs, source)
}
mountArgs = append(mountArgs, target)
return mountArgs
}
// AddSystemdScope adds "system-run --scope" to given command line
// implementation is shared with NsEnterMounter
func AddSystemdScope(systemdRunPath, mountName, command string, args []string) (string, []string) {
descriptionArg := fmt.Sprintf("--description=Kubernetes transient mount for %s", mountName)
systemdRunArgs := []string{descriptionArg, "--scope", "--", command}
return systemdRunPath, append(systemdRunArgs, args...)
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
klog.V(4).Infof("Unmounting %s", target)
command := exec.Command("umount", target)
output, err := command.CombinedOutput()
if err != nil {
return fmt.Errorf("unmount failed: %v\nUnmounting arguments: %s\nOutput: %s", err, target, string(output))
}
return nil
}
// List returns a list of all mounted filesystems.
func (*Mounter) List() ([]MountPoint, error) {
return ListProcMounts(procMountsPath)
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
// It is fast but not necessarily ALWAYS correct. If the path is in fact
// a bind mount from one part of a mount to another it will not be detected.
// It also can not distinguish between mountpoints and symbolic links.
// mkdir /tmp/a /tmp/b; mount --bind /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
// will return true. When in fact /tmp/b is a mount point. If this situation
// is of interest to you, don't use this function...
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
stat, err := os.Stat(file)
if err != nil {
return true, err
}
rootStat, err := os.Stat(filepath.Dir(strings.TrimSuffix(file, "/")))
if err != nil {
return true, err
}
// If the directory has a different device as parent, then it is a mountpoint.
if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
return false, nil
}
return true, nil
}
// GetMountRefs finds all mount references to pathname, returns a
// list of paths. Path could be a mountpoint or a normal
// directory (for bind mount).
func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) {
pathExists, pathErr := PathExists(pathname)
if !pathExists {
return []string{}, nil
} else if IsCorruptedMnt(pathErr) {
klog.Warningf("GetMountRefs found corrupted mount at %s, treating as unmounted path", pathname)
return []string{}, nil
} else if pathErr != nil {
return nil, fmt.Errorf("error checking path %s: %v", pathname, pathErr)
}
realpath, err := filepath.EvalSymlinks(pathname)
if err != nil {
return nil, err
}
return SearchMountPoints(realpath, procMountInfoPath)
}
// checkAndRepairFileSystem checks and repairs filesystems using command fsck.
func (mounter *SafeFormatAndMount) checkAndRepairFilesystem(source string) error {
klog.V(4).Infof("Checking for issues with fsck on disk: %s", source)
args := []string{"-a", source}
out, err := mounter.Exec.Command("fsck", args...).CombinedOutput()
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilexec.ErrExecutableNotFound:
klog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
klog.Infof("Device %s has errors which were corrected by fsck.", source)
case isExitError && ee.ExitStatus() == fsckErrorsUncorrected:
return NewMountError(HasFilesystemErrors, "'fsck' found errors on device %s but could not correct them: %s", source, string(out))
case isExitError && ee.ExitStatus() > fsckErrorsUncorrected:
klog.Infof("`fsck` error %s", string(out))
}
}
return nil
}
// checkAndRepairXfsFilesystem checks and repairs xfs filesystem using command xfs_repair.
func (mounter *SafeFormatAndMount) checkAndRepairXfsFilesystem(source string) error {
klog.V(4).Infof("Checking for issues with xfs_repair on disk: %s", source)
args := []string{source}
checkArgs := []string{"-n", source}
// check-only using "xfs_repair -n", if the exit status is not 0, perform a "xfs_repair"
_, err := mounter.Exec.Command("xfs_repair", checkArgs...).CombinedOutput()
if err != nil {
if err == utilexec.ErrExecutableNotFound {
klog.Warningf("'xfs_repair' not found on system; continuing mount without running 'xfs_repair'.")
return nil
} else {
klog.Warningf("Filesystem corruption was detected for %s, running xfs_repair to repair", source)
out, err := mounter.Exec.Command("xfs_repair", args...).CombinedOutput()
if err != nil {
return NewMountError(HasFilesystemErrors, "'xfs_repair' found errors on device %s but could not correct them: %s\n", source, out)
} else {
klog.Infof("Device %s has errors which were corrected by xfs_repair.", source)
return nil
}
}
}
return nil
}
// formatAndMount uses unix utils to format and mount the given disk
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
readOnly := false
for _, option := range options {
if option == "ro" {
readOnly = true
break
}
}
options = append(options, "defaults")
mountErrorValue := UnknownMountError
// Check if the disk is already formatted
existingFormat, err := mounter.GetDiskFormat(source)
if err != nil {
return NewMountError(GetDiskFormatFailed, "failed to get disk format of disk %s: %v", source, err)
}
// Use 'ext4' as the default
if len(fstype) == 0 {
fstype = "ext4"
}
if existingFormat == "" {
// Do not attempt to format the disk if mounting as readonly, return an error to reflect this.
if readOnly {
return NewMountError(UnformattedReadOnly, "cannot mount unformatted disk %s as we are manipulating it in read-only mode", source)
}
// Disk is unformatted so format it.
args := []string{source}
if fstype == "ext4" || fstype == "ext3" {
args = []string{
"-F", // Force flag
"-m0", // Zero blocks reserved for super-user
source,
}
}
klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args)
output, err := mounter.Exec.Command("mkfs."+fstype, args...).CombinedOutput()
if err != nil {
detailedErr := fmt.Sprintf("format of disk %q failed: type:(%q) target:(%q) options:(%q) errcode:(%v) output:(%v) ", source, fstype, target, options, err, string(output))
klog.Error(detailedErr)
return NewMountError(FormatFailed, detailedErr)
}
klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target)
} else {
if fstype != existingFormat {
// Verify that the disk is formatted with filesystem type we are expecting
mountErrorValue = FilesystemMismatch
klog.Warningf("Configured to mount disk %s as %s but current format is %s, things might break", source, existingFormat, fstype)
}
if !readOnly {
// Run check tools on the disk to fix repairable issues, only do this for formatted volumes requested as rw.
var err error
switch existingFormat {
case "xfs":
err = mounter.checkAndRepairXfsFilesystem(source)
default:
err = mounter.checkAndRepairFilesystem(source)
}
if err != nil {
return err
}
}
}
// Mount the disk
klog.V(4).Infof("Attempting to mount disk %s in %s format at %s", source, fstype, target)
if err := mounter.Interface.Mount(source, target, fstype, options); err != nil {
return NewMountError(mountErrorValue, err.Error())
}
return nil
}
// GetDiskFormat uses 'blkid' to see if the given disk is unformatted
func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) {
args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk}
klog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args)
dataOut, err := mounter.Exec.Command("blkid", args...).CombinedOutput()
output := string(dataOut)
klog.V(4).Infof("Output: %q, err: %v", output, err)
if err != nil {
if exit, ok := err.(utilexec.ExitError); ok {
if exit.ExitStatus() == 2 {
// Disk device is unformatted.
// For `blkid`, if the specified token (TYPE/PTTYPE, etc) was
// not found, or no (specified) devices could be identified, an
// exit code of 2 is returned.
return "", nil
}
}
klog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
return "", err
}
var fstype, pttype string
lines := strings.Split(output, "\n")
for _, l := range lines {
if len(l) <= 0 {
// Ignore empty line.
continue
}
cs := strings.Split(l, "=")
if len(cs) != 2 {
return "", fmt.Errorf("blkid returns invalid output: %s", output)
}
// TYPE is filesystem type, and PTTYPE is partition table type, according
// to https://www.kernel.org/pub/linux/utils/util-linux/v2.21/libblkid-docs/.
if cs[0] == "TYPE" {
fstype = cs[1]
} else if cs[0] == "PTTYPE" {
pttype = cs[1]
}
}
if len(pttype) > 0 {
klog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype)
// Returns a special non-empty string as filesystem type, then kubelet
// will not format it.
return "unknown data, probably partitions", nil
}
return fstype, nil
}
// ListProcMounts is shared with NsEnterMounter
func ListProcMounts(mountFilePath string) ([]MountPoint, error) {
content, err := utilio.ConsistentRead(mountFilePath, maxListTries)
if err != nil {
return nil, err
}
return parseProcMounts(content)
}
func parseProcMounts(content []byte) ([]MountPoint, error) {
out := []MountPoint{}
lines := strings.Split(string(content), "\n")
for _, line := range lines {
if line == "" {
// the last split() item is empty string following the last \n
continue
}
fields := strings.Fields(line)
if len(fields) != expectedNumFieldsPerLine {
return nil, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line)
}
mp := MountPoint{
Device: fields[0],
Path: fields[1],
Type: fields[2],
Opts: strings.Split(fields[3], ","),
}
freq, err := strconv.Atoi(fields[4])
if err != nil {
return nil, err
}
mp.Freq = freq
pass, err := strconv.Atoi(fields[5])
if err != nil {
return nil, err
}
mp.Pass = pass
out = append(out, mp)
}
return out, nil
}
// SearchMountPoints finds all mount references to the source, returns a list of
// mountpoints.
// The source can be a mount point or a normal directory (bind mount). We
// didn't support device because there is no use case by now.
// Some filesystems may share a source name, e.g. tmpfs. And for bind mounting,
// it's possible to mount a non-root path of a filesystem, so we need to use
// root path and major:minor to represent mount source uniquely.
// This implementation is shared between Linux and NsEnterMounter
func SearchMountPoints(hostSource, mountInfoPath string) ([]string, error) {
mis, err := ParseMountInfo(mountInfoPath)
if err != nil {
return nil, err
}
mountID := 0
rootPath := ""
major := -1
minor := -1
// Finding the underlying root path and major:minor if possible.
// We need search in backward order because it's possible for later mounts
// to overlap earlier mounts.
for i := len(mis) - 1; i >= 0; i-- {
if hostSource == mis[i].MountPoint || PathWithinBase(hostSource, mis[i].MountPoint) {
// If it's a mount point or path under a mount point.
mountID = mis[i].ID
rootPath = filepath.Join(mis[i].Root, strings.TrimPrefix(hostSource, mis[i].MountPoint))
major = mis[i].Major
minor = mis[i].Minor
break
}
}
if rootPath == "" || major == -1 || minor == -1 {
return nil, fmt.Errorf("failed to get root path and major:minor for %s", hostSource)
}
var refs []string
for i := range mis {
if mis[i].ID == mountID {
// Ignore mount entry for mount source itself.
continue
}
if mis[i].Root == rootPath && mis[i].Major == major && mis[i].Minor == minor {
refs = append(refs, mis[i].MountPoint)
}
}
return refs, nil
}

72
vendor/k8s.io/utils/mount/mount_unsupported.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// +build !linux,!windows
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"errors"
)
// Mounter implements mount.Interface for unsupported platforms
type Mounter struct {
mounterPath string
}
var errUnsupported = errors.New("util/mount on this platform is not supported")
// New returns a mount.Interface for the current system.
// It provides options to override the default mounter behavior.
// mounterPath allows using an alternative to `/bin/mount` for mounting.
func New(mounterPath string) Interface {
return &Mounter{
mounterPath: mounterPath,
}
}
// Mount always returns an error on unsupported platforms
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
return errUnsupported
}
// Unmount always returns an error on unsupported platforms
func (mounter *Mounter) Unmount(target string) error {
return errUnsupported
}
// List always returns an error on unsupported platforms
func (mounter *Mounter) List() ([]MountPoint, error) {
return []MountPoint{}, errUnsupported
}
// IsLikelyNotMountPoint always returns an error on unsupported platforms
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
return true, errUnsupported
}
// GetMountRefs always returns an error on unsupported platforms
func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) {
return nil, errUnsupported
}
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
return mounter.Interface.Mount(source, target, fstype, options)
}
func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) {
return true, errUnsupported
}

280
vendor/k8s.io/utils/mount/mount_windows.go generated vendored Normal file
View File

@ -0,0 +1,280 @@
// +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"k8s.io/klog"
utilexec "k8s.io/utils/exec"
"k8s.io/utils/keymutex"
utilpath "k8s.io/utils/path"
)
// Mounter provides the default implementation of mount.Interface
// for the windows platform. This implementation assumes that the
// kubelet is running in the host's root mount namespace.
type Mounter struct {
mounterPath string
}
// New returns a mount.Interface for the current system.
// It provides options to override the default mounter behavior.
// mounterPath allows using an alternative to `/bin/mount` for mounting.
func New(mounterPath string) Interface {
return &Mounter{
mounterPath: mounterPath,
}
}
// acquire lock for smb mount
var getSMBMountMutex = keymutex.NewHashed(0)
// Mount : mounts source to target with given options.
// currently only supports cifs(smb), bind mount(for disk)
func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
target = NormalizeWindowsPath(target)
if source == "tmpfs" {
klog.V(3).Infof("mounting source (%q), target (%q), with options (%q)", source, target, options)
return os.MkdirAll(target, 0755)
}
parentDir := filepath.Dir(target)
if err := os.MkdirAll(parentDir, 0755); err != nil {
return err
}
klog.V(4).Infof("mount options(%q) source:%q, target:%q, fstype:%q, begin to mount",
options, source, target, fstype)
bindSource := source
// tell it's going to mount azure disk or azure file according to options
if bind, _, _ := MakeBindOpts(options); bind {
// mount azure disk
bindSource = NormalizeWindowsPath(source)
} else {
if len(options) < 2 {
klog.Warningf("mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting",
options, len(options), source, target)
return nil
}
// currently only cifs mount is supported
if strings.ToLower(fstype) != "cifs" {
return fmt.Errorf("only cifs mount is supported now, fstype: %q, mounting source (%q), target (%q), with options (%q)", fstype, source, target, options)
}
// lock smb mount for the same source
getSMBMountMutex.LockKey(source)
defer getSMBMountMutex.UnlockKey(source)
if output, err := newSMBMapping(options[0], options[1], source); err != nil {
if isSMBMappingExist(source) {
klog.V(2).Infof("SMB Mapping(%s) already exists, now begin to remove and remount", source)
if output, err := removeSMBMapping(source); err != nil {
return fmt.Errorf("Remove-SmbGlobalMapping failed: %v, output: %q", err, output)
}
if output, err := newSMBMapping(options[0], options[1], source); err != nil {
return fmt.Errorf("New-SmbGlobalMapping remount failed: %v, output: %q", err, output)
}
} else {
return fmt.Errorf("New-SmbGlobalMapping failed: %v, output: %q", err, output)
}
}
}
if output, err := exec.Command("cmd", "/c", "mklink", "/D", target, bindSource).CombinedOutput(); err != nil {
klog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output))
return err
}
return nil
}
// do the SMB mount with username, password, remotepath
// return (output, error)
func newSMBMapping(username, password, remotepath string) (string, error) {
if username == "" || password == "" || remotepath == "" {
return "", fmt.Errorf("invalid parameter(username: %s, password: %s, remoteapth: %s)", username, password, remotepath)
}
// use PowerShell Environment Variables to store user input string to prevent command line injection
// https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-5.1
cmdLine := `$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force` +
`;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord` +
`;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential`
cmd := exec.Command("powershell", "/c", cmdLine)
cmd.Env = append(os.Environ(),
fmt.Sprintf("smbuser=%s", username),
fmt.Sprintf("smbpassword=%s", password),
fmt.Sprintf("smbremotepath=%s", remotepath))
output, err := cmd.CombinedOutput()
return string(output), err
}
// check whether remotepath is already mounted
func isSMBMappingExist(remotepath string) bool {
cmd := exec.Command("powershell", "/c", `Get-SmbGlobalMapping -RemotePath $Env:smbremotepath`)
cmd.Env = append(os.Environ(), fmt.Sprintf("smbremotepath=%s", remotepath))
_, err := cmd.CombinedOutput()
return err == nil
}
// remove SMB mapping
func removeSMBMapping(remotepath string) (string, error) {
cmd := exec.Command("powershell", "/c", `Remove-SmbGlobalMapping -RemotePath $Env:smbremotepath -Force`)
cmd.Env = append(os.Environ(), fmt.Sprintf("smbremotepath=%s", remotepath))
output, err := cmd.CombinedOutput()
return string(output), err
}
// Unmount unmounts the target.
func (mounter *Mounter) Unmount(target string) error {
klog.V(4).Infof("azureMount: Unmount target (%q)", target)
target = NormalizeWindowsPath(target)
if output, err := exec.Command("cmd", "/c", "rmdir", target).CombinedOutput(); err != nil {
klog.Errorf("rmdir failed: %v, output: %q", err, string(output))
return err
}
return nil
}
// List returns a list of all mounted filesystems. todo
func (mounter *Mounter) List() ([]MountPoint, error) {
return []MountPoint{}, nil
}
// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
stat, err := os.Lstat(file)
if err != nil {
return true, err
}
// If current file is a symlink, then it is a mountpoint.
if stat.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(file)
if err != nil {
return true, fmt.Errorf("readlink error: %v", err)
}
exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, target)
if err != nil {
return true, err
}
return !exists, nil
}
return true, nil
}
// GetMountRefs : empty implementation here since there is no place to query all mount points on Windows
func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) {
windowsPath := NormalizeWindowsPath(pathname)
pathExists, pathErr := PathExists(windowsPath)
if !pathExists {
return []string{}, nil
} else if IsCorruptedMnt(pathErr) {
klog.Warningf("GetMountRefs found corrupted mount at %s, treating as unmounted path", windowsPath)
return []string{}, nil
} else if pathErr != nil {
return nil, fmt.Errorf("error checking path %s: %v", windowsPath, pathErr)
}
return []string{pathname}, nil
}
func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
// Try to mount the disk
klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target)
if err := ValidateDiskNumber(source); err != nil {
klog.Errorf("diskMount: formatAndMount failed, err: %v", err)
return err
}
if len(fstype) == 0 {
// Use 'NTFS' as the default
fstype = "NTFS"
}
// format disk if it is unformatted(raw)
cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru"+
" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", source, fstype)
if output, err := mounter.Exec.Command("powershell", "/c", cmd).CombinedOutput(); err != nil {
return fmt.Errorf("diskMount: format disk failed, error: %v, output: %q", err, string(output))
}
klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype)
driveLetter, err := getDriveLetterByDiskNumber(source, mounter.Exec)
if err != nil {
return err
}
driverPath := driveLetter + ":"
target = NormalizeWindowsPath(target)
klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target)
if output, err := mounter.Exec.Command("cmd", "/c", "mklink", "/D", target, driverPath).CombinedOutput(); err != nil {
klog.Errorf("mklink failed: %v, output: %q", err, string(output))
return err
}
return nil
}
// Get drive letter according to windows disk number
func getDriveLetterByDiskNumber(diskNum string, exec utilexec.Interface) (string, error) {
cmd := fmt.Sprintf("(Get-Partition -DiskNumber %s).DriveLetter", diskNum)
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil {
return "", fmt.Errorf("azureMount: Get Drive Letter failed: %v, output: %q", err, string(output))
}
if len(string(output)) < 1 {
return "", fmt.Errorf("azureMount: Get Drive Letter failed, output is empty")
}
return string(output)[:1], nil
}
// getAllParentLinks walks all symbolic links and return all the parent targets recursively
func getAllParentLinks(path string) ([]string, error) {
const maxIter = 255
links := []string{}
for {
links = append(links, path)
if len(links) > maxIter {
return links, fmt.Errorf("unexpected length of parent links: %v", links)
}
fi, err := os.Lstat(path)
if err != nil {
return links, fmt.Errorf("Lstat: %v", err)
}
if fi.Mode()&os.ModeSymlink == 0 {
break
}
path, err = os.Readlink(path)
if err != nil {
return links, fmt.Errorf("Readlink error: %v", err)
}
}
return links, nil
}

100
vendor/k8s.io/utils/net/ipnet.go generated vendored
View File

@ -17,6 +17,7 @@ limitations under the License.
package net
import (
"fmt"
"net"
"strings"
)
@ -119,3 +120,102 @@ func (s IPNetSet) Equal(s2 IPNetSet) bool {
func (s IPNetSet) Len() int {
return len(s)
}
// IPSet maps string to net.IP
type IPSet map[string]net.IP
// ParseIPSet parses string slice to IPSet
func ParseIPSet(items ...string) (IPSet, error) {
ipset := make(IPSet)
for _, item := range items {
ip := net.ParseIP(strings.TrimSpace(item))
if ip == nil {
return nil, fmt.Errorf("error parsing IP %q", item)
}
ipset[ip.String()] = ip
}
return ipset, nil
}
// Insert adds items to the set.
func (s IPSet) Insert(items ...net.IP) {
for _, item := range items {
s[item.String()] = item
}
}
// Delete removes all items from the set.
func (s IPSet) Delete(items ...net.IP) {
for _, item := range items {
delete(s, item.String())
}
}
// Has returns true if and only if item is contained in the set.
func (s IPSet) Has(item net.IP) bool {
_, contained := s[item.String()]
return contained
}
// HasAll returns true if and only if all items are contained in the set.
func (s IPSet) HasAll(items ...net.IP) bool {
for _, item := range items {
if !s.Has(item) {
return false
}
}
return true
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s1.Difference(s2) = {a3}
// s2.Difference(s1) = {a4, a5}
func (s IPSet) Difference(s2 IPSet) IPSet {
result := make(IPSet)
for k, i := range s {
_, found := s2[k]
if found {
continue
}
result[k] = i
}
return result
}
// StringSlice returns a []string with the String representation of each element in the set.
// Order is undefined.
func (s IPSet) StringSlice() []string {
a := make([]string, 0, len(s))
for k := range s {
a = append(a, k)
}
return a
}
// IsSuperset returns true if and only if s1 is a superset of s2.
func (s IPSet) IsSuperset(s2 IPSet) bool {
for k := range s2 {
_, found := s[k]
if !found {
return false
}
}
return true
}
// Equal returns true if and only if s1 is equal (as a set) to s2.
// Two sets are equal if their membership is identical.
// (In practice, this means same elements, order doesn't matter)
func (s IPSet) Equal(s2 IPSet) bool {
return len(s) == len(s2) && s.IsSuperset(s2)
}
// Len returns the size of the set.
func (s IPSet) Len() int {
return len(s)
}

154
vendor/k8s.io/utils/net/net.go generated vendored
View File

@ -16,7 +16,102 @@ limitations under the License.
package net
import "net"
import (
"errors"
"fmt"
"math"
"math/big"
"net"
"strconv"
)
// ParseCIDRs parses a list of cidrs and return error if any is invalid.
// order is maintained
func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) {
cidrs := make([]*net.IPNet, 0, len(cidrsString))
for _, cidrString := range cidrsString {
_, cidr, err := net.ParseCIDR(cidrString)
if err != nil {
return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err)
}
cidrs = append(cidrs, cidr)
}
return cidrs, nil
}
// IsDualStackIPs returns if a slice of ips is:
// - all are valid ips
// - at least one ip from each family (v4 or v6)
func IsDualStackIPs(ips []net.IP) (bool, error) {
v4Found := false
v6Found := false
for _, ip := range ips {
if ip == nil {
return false, fmt.Errorf("ip %v is invalid", ip)
}
if v4Found && v6Found {
continue
}
if IsIPv6(ip) {
v6Found = true
continue
}
v4Found = true
}
return (v4Found && v6Found), nil
}
// IsDualStackIPStrings returns if
// - all are valid ips
// - at least one ip from each family (v4 or v6)
func IsDualStackIPStrings(ips []string) (bool, error) {
parsedIPs := make([]net.IP, 0, len(ips))
for _, ip := range ips {
parsedIP := net.ParseIP(ip)
parsedIPs = append(parsedIPs, parsedIP)
}
return IsDualStackIPs(parsedIPs)
}
// IsDualStackCIDRs returns if
// - all are valid cidrs
// - at least one cidr from each family (v4 or v6)
func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
v4Found := false
v6Found := false
for _, cidr := range cidrs {
if cidr == nil {
return false, fmt.Errorf("cidr %v is invalid", cidr)
}
if v4Found && v6Found {
continue
}
if IsIPv6(cidr.IP) {
v6Found = true
continue
}
v4Found = true
}
return v4Found && v6Found, nil
}
// IsDualStackCIDRStrings returns if
// - all are valid cidrs
// - at least one cidr from each family (v4 or v6)
func IsDualStackCIDRStrings(cidrs []string) (bool, error) {
parsedCIDRs, err := ParseCIDRs(cidrs)
if err != nil {
return false, err
}
return IsDualStackCIDRs(parsedCIDRs)
}
// IsIPv6 returns if netIP is IPv6.
func IsIPv6(netIP net.IP) bool {
@ -35,3 +130,60 @@ func IsIPv6CIDRString(cidr string) bool {
ip, _, _ := net.ParseCIDR(cidr)
return IsIPv6(ip)
}
// IsIPv6CIDR returns if a cidr is ipv6
func IsIPv6CIDR(cidr *net.IPNet) bool {
ip := cidr.IP
return IsIPv6(ip)
}
// ParsePort parses a string representing an IP port. If the string is not a
// valid port number, this returns an error.
func ParsePort(port string, allowZero bool) (int, error) {
portInt, err := strconv.ParseUint(port, 10, 16)
if err != nil {
return 0, err
}
if portInt == 0 && !allowZero {
return 0, errors.New("0 is not a valid port number")
}
return int(portInt), nil
}
// BigForIP creates a big.Int based on the provided net.IP
func BigForIP(ip net.IP) *big.Int {
b := ip.To4()
if b == nil {
b = ip.To16()
}
return big.NewInt(0).SetBytes(b)
}
// AddIPOffset adds the provided integer offset to a base big.Int representing a
// net.IP
func AddIPOffset(base *big.Int, offset int) net.IP {
return net.IP(big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes())
}
// RangeSize returns the size of a range in valid addresses.
// returns the size of the subnet (or math.MaxInt64 if the range size would overflow int64)
func RangeSize(subnet *net.IPNet) int64 {
ones, bits := subnet.Mask.Size()
if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 {
return 0
}
// this checks that we are not overflowing an int64
if bits-ones >= 63 {
return math.MaxInt64
}
return int64(1) << uint(bits-ones)
}
// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space.
func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) {
ip := AddIPOffset(BigForIP(subnet.IP), index)
if !subnet.Contains(ip) {
return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet)
}
return ip, nil
}

View File

@ -56,7 +56,7 @@ func Int64Ptr(i int64) *int64 {
return &i
}
// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil,
// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil,
// else returns def.
func Int32PtrDerefOr(ptr *int32, def int32) int32 {
if ptr != nil {

51
vendor/k8s.io/utils/trace/trace.go generated vendored
View File

@ -25,31 +25,55 @@ import (
"k8s.io/klog"
)
// Field is a key value pair that provides additional details about the trace.
type Field struct {
Key string
Value interface{}
}
func (f Field) format() string {
return fmt.Sprintf("%s:%v", f.Key, f.Value)
}
func writeFields(b *bytes.Buffer, l []Field) {
for i, f := range l {
b.WriteString(f.format())
if i < len(l)-1 {
b.WriteString(",")
}
}
}
type traceStep struct {
stepTime time.Time
msg string
fields []Field
}
// Trace keeps track of a set of "steps" and allows us to log a specific
// step if it took longer than its share of the total allowed time
type Trace struct {
name string
fields []Field
startTime time.Time
steps []traceStep
}
// New creates a Trace with the specified name
func New(name string) *Trace {
return &Trace{name, time.Now(), nil}
// New creates a Trace with the specified name. The name identifies the operation to be traced. The
// Fields add key value pairs to provide additional details about the trace, such as operation inputs.
func New(name string, fields ...Field) *Trace {
return &Trace{name: name, startTime: time.Now(), fields: fields}
}
// Step adds a new step with a specific message
func (t *Trace) Step(msg string) {
// Step adds a new step with a specific message. Call this at the end of an execution step to record
// how long it took. The Fields add key value pairs to provide additional details about the trace
// step.
func (t *Trace) Step(msg string, fields ...Field) {
if t.steps == nil {
// traces almost always have less than 6 steps, do this to avoid more than a single allocation
t.steps = make([]traceStep, 0, 6)
}
t.steps = append(t.steps, traceStep{time.Now(), msg})
t.steps = append(t.steps, traceStep{stepTime: time.Now(), msg: msg, fields: fields})
}
// Log is used to dump all the steps in the Trace
@ -64,12 +88,23 @@ func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) {
endTime := time.Now()
totalTime := endTime.Sub(t.startTime)
buffer.WriteString(fmt.Sprintf("Trace[%d]: %q (started: %v) (total time: %v):\n", tracenum, t.name, t.startTime, totalTime))
buffer.WriteString(fmt.Sprintf("Trace[%d]: %q ", tracenum, t.name))
if len(t.fields) > 0 {
writeFields(&buffer, t.fields)
buffer.WriteString(" ")
}
buffer.WriteString(fmt.Sprintf("(started: %v) (total time: %v):\n", t.startTime, totalTime))
lastStepTime := t.startTime
for _, step := range t.steps {
stepDuration := step.stepTime.Sub(lastStepTime)
if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) {
buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg))
buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] ", tracenum, step.stepTime.Sub(t.startTime), stepDuration))
buffer.WriteString(step.msg)
if len(step.fields) > 0 {
buffer.WriteString(" ")
writeFields(&buffer, step.fields)
}
buffer.WriteString("\n")
}
lastStepTime = step.stepTime
}

20
vendor/modules.txt vendored
View File

@ -21,7 +21,7 @@ github.com/emicklei/go-restful/log
github.com/evanphx/json-patch
# github.com/fatih/camelcase v1.0.0
github.com/fatih/camelcase
# github.com/ghodss/yaml v1.0.0
# github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4
github.com/ghodss/yaml
# github.com/go-openapi/jsonpointer v0.19.0
github.com/go-openapi/jsonpointer
@ -127,26 +127,29 @@ github.com/opencontainers/go-digest
github.com/pborman/uuid
# github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/peterbourgon/diskv
# github.com/pkg/errors v0.8.1
# github.com/pkg/errors v0.8.0
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v0.9.2
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
# github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.3.0
# github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.0.0-20190412120340-e22ddced7142
# github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/util
github.com/prometheus/procfs/nfs
github.com/prometheus/procfs/xfs
# github.com/spf13/cobra v0.0.3
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.1
github.com/spf13/pflag
# github.com/stretchr/testify v1.2.2
# github.com/stretchr/testify v1.3.0
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
@ -564,7 +567,7 @@ k8s.io/component-base/config
# k8s.io/csi-translation-lib v0.0.0-20190415134207-82f1dfd98d10
k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins
# k8s.io/klog v0.0.0-20190306015804-8e90cee79f82
# k8s.io/klog v0.3.0
k8s.io/klog
# k8s.io/kube-aggregator v0.0.0-20190415133304-80ce4e5a0cbc
k8s.io/kube-aggregator/pkg/apis/apiregistration
@ -779,12 +782,13 @@ k8s.io/kubernetes/test/e2e/storage/utils
k8s.io/kubernetes/test/utils
k8s.io/kubernetes/test/utils/image
k8s.io/kubernetes/third_party/forked/golang/expansion
# k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7
# k8s.io/utils v0.0.0-20200124190032-861946025e34
k8s.io/utils/buffer
k8s.io/utils/exec
k8s.io/utils/integer
k8s.io/utils/io
k8s.io/utils/keymutex
k8s.io/utils/mount
k8s.io/utils/net
k8s.io/utils/nsenter
k8s.io/utils/path