Merge pull request #622 from kubernetes-csi/dependabot/go_modules/github.com/onsi/ginkgo/v2-2.16.0
chore(deps): bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.16.0
This commit is contained in:
commit
13ac8012d8
4
go.mod
4
go.mod
@ -6,7 +6,7 @@ require (
|
||||
github.com/container-storage-interface/spec v1.8.0
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.0
|
||||
github.com/onsi/ginkgo/v2 v2.15.0
|
||||
github.com/onsi/ginkgo/v2 v2.16.0
|
||||
github.com/onsi/gomega v1.31.1
|
||||
github.com/pborman/uuid v1.2.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
@ -102,7 +102,7 @@ require (
|
||||
golang.org/x/term v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.16.1 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
|
||||
8
go.sum
8
go.sum
@ -293,8 +293,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM=
|
||||
github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo=
|
||||
@ -568,8 +568,8 @@ golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
19
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
19
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
generated
vendored
@ -1,3 +1,22 @@
|
||||
## 2.16.0
|
||||
|
||||
### Features
|
||||
- add SpecContext to reporting nodes
|
||||
|
||||
### Fixes
|
||||
- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
|
||||
- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
|
||||
|
||||
### Maintenance
|
||||
- docs/index.md: Typo [2cebe8d]
|
||||
- fix docs [06de431]
|
||||
- chore: test with Go 1.22 (#1352) [898cba9]
|
||||
- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
|
||||
- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
|
||||
- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
|
||||
- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
|
||||
- Fix docs for handling failures in goroutines (#1339) [4471b2e]
|
||||
|
||||
## 2.15.0
|
||||
|
||||
### Features
|
||||
|
||||
13
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/v2/core_dsl.go
generated
vendored
@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
|
||||
|
||||
err = global.Suite.BuildTree()
|
||||
exitIfErr(err)
|
||||
suitePath, err := os.Getwd()
|
||||
suitePath, err := getwd()
|
||||
exitIfErr(err)
|
||||
suitePath, err = filepath.Abs(suitePath)
|
||||
exitIfErr(err)
|
||||
@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels {
|
||||
return suiteLabels
|
||||
}
|
||||
|
||||
func getwd() (string, error) {
|
||||
if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
|
||||
// Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
|
||||
// is shared between two different directories with the same test code.
|
||||
return os.Getwd()
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
|
||||
See http://onsi.github.io/ginkgo/#previewing-specs for more information.
|
||||
@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report {
|
||||
|
||||
err = global.Suite.BuildTree()
|
||||
exitIfErr(err)
|
||||
suitePath, err := os.Getwd()
|
||||
suitePath, err := getwd()
|
||||
exitIfErr(err)
|
||||
suitePath, err = filepath.Abs(suitePath)
|
||||
exitIfErr(err)
|
||||
|
||||
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
Normal file
129
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright (c) 2015, Wade Simmons
|
||||
// All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
|
||||
// 1. Redistributions of source code must retain the above copyright notice, this
|
||||
// list of conditions and the following disclaimer.
|
||||
// 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Package gocovmerge takes the results from multiple `go test -coverprofile`
|
||||
// runs and merges them into one profile
|
||||
|
||||
// this file was originally taken from the gocovmerge project
|
||||
// see also: https://go.shabbyrobe.org/gocovmerge
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/cover"
|
||||
)
|
||||
|
||||
func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
|
||||
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
|
||||
if i < len(profiles) && profiles[i].FileName == p.FileName {
|
||||
MergeCoverProfiles(profiles[i], p)
|
||||
} else {
|
||||
profiles = append(profiles, nil)
|
||||
copy(profiles[i+1:], profiles[i:])
|
||||
profiles[i] = p
|
||||
}
|
||||
return profiles
|
||||
}
|
||||
|
||||
func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
|
||||
if len(profiles) == 0 {
|
||||
return nil
|
||||
}
|
||||
if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range profiles {
|
||||
for _, b := range p.Blocks {
|
||||
if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
|
||||
if into.Mode != merge.Mode {
|
||||
return fmt.Errorf("cannot merge profiles with different modes")
|
||||
}
|
||||
// Since the blocks are sorted, we can keep track of where the last block
|
||||
// was inserted and only look at the blocks after that as targets for merge
|
||||
startIndex := 0
|
||||
for _, b := range merge.Blocks {
|
||||
var err error
|
||||
startIndex, err = mergeProfileBlock(into, b, startIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
|
||||
sortFunc := func(i int) bool {
|
||||
pi := p.Blocks[i+startIndex]
|
||||
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
|
||||
}
|
||||
|
||||
i := 0
|
||||
if sortFunc(i) != true {
|
||||
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
|
||||
}
|
||||
|
||||
i += startIndex
|
||||
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
|
||||
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
|
||||
return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
|
||||
}
|
||||
switch p.Mode {
|
||||
case "set":
|
||||
p.Blocks[i].Count |= pb.Count
|
||||
case "count", "atomic":
|
||||
p.Blocks[i].Count += pb.Count
|
||||
default:
|
||||
return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
|
||||
}
|
||||
|
||||
} else {
|
||||
if i > 0 {
|
||||
pa := p.Blocks[i-1]
|
||||
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
|
||||
return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
|
||||
}
|
||||
}
|
||||
if i < len(p.Blocks)-1 {
|
||||
pa := p.Blocks[i+1]
|
||||
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
|
||||
return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
|
||||
}
|
||||
}
|
||||
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
|
||||
copy(p.Blocks[i+1:], p.Blocks[i:])
|
||||
p.Blocks[i] = pb
|
||||
}
|
||||
|
||||
return i + 1, nil
|
||||
}
|
||||
42
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
42
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -12,6 +11,7 @@ import (
|
||||
"github.com/google/pprof/profile"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
"golang.org/x/tools/cover"
|
||||
)
|
||||
|
||||
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
||||
@ -144,38 +144,26 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// loads each profile, combines them, deletes them, stores them in destination
|
||||
// loads each profile, merges them, deletes them, stores them in destination
|
||||
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
||||
combined := &bytes.Buffer{}
|
||||
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||
for i, profile := range profiles {
|
||||
contents, err := os.ReadFile(profile)
|
||||
var merged []*cover.Profile
|
||||
for _, file := range profiles {
|
||||
parsedProfiles, err := cover.ParseProfiles(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
|
||||
return err
|
||||
}
|
||||
os.Remove(profile)
|
||||
|
||||
// remove the cover mode line from every file
|
||||
// except the first one
|
||||
if i > 0 {
|
||||
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||
}
|
||||
|
||||
_, err = combined.Write(contents)
|
||||
|
||||
// Add a newline to the end of every file if missing.
|
||||
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||
_, err = combined.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
|
||||
os.Remove(file)
|
||||
for _, p := range parsedProfiles {
|
||||
merged = AddCoverProfile(merged, p)
|
||||
}
|
||||
}
|
||||
|
||||
err := os.WriteFile(destination, combined.Bytes(), 0666)
|
||||
dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
|
||||
return err
|
||||
}
|
||||
err = DumpCoverProfiles(merged, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
33
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
33
vendor/github.com/onsi/ginkgo/v2/internal/node.go
generated
vendored
@ -5,9 +5,8 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0)
|
||||
var _global_id_mutex = &sync.Mutex{}
|
||||
|
||||
func UniqueNodeID() uint {
|
||||
//There's a reace in the internal integration tests if we don't make
|
||||
//accessing _global_node_id_counter safe across goroutines.
|
||||
// There's a reace in the internal integration tests if we don't make
|
||||
// accessing _global_node_id_counter safe across goroutines.
|
||||
_global_id_mutex.Lock()
|
||||
defer _global_id_mutex.Unlock()
|
||||
_global_node_id_counter += 1
|
||||
@ -44,8 +43,8 @@ type Node struct {
|
||||
SynchronizedAfterSuiteProc1Body func(SpecContext)
|
||||
SynchronizedAfterSuiteProc1BodyHasContext bool
|
||||
|
||||
ReportEachBody func(types.SpecReport)
|
||||
ReportSuiteBody func(types.Report)
|
||||
ReportEachBody func(SpecContext, types.SpecReport)
|
||||
ReportSuiteBody func(SpecContext, types.Report)
|
||||
|
||||
MarkedFocus bool
|
||||
MarkedPending bool
|
||||
@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
args = unrollInterfaceSlice(args)
|
||||
|
||||
remainingArgs := []interface{}{}
|
||||
//First get the CodeLocation up-to-date
|
||||
// First get the CodeLocation up-to-date
|
||||
for _, arg := range args {
|
||||
switch v := arg.(type) {
|
||||
case Offset:
|
||||
@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
trackedFunctionError := false
|
||||
args = remainingArgs
|
||||
remainingArgs = []interface{}{}
|
||||
//now process the rest of the args
|
||||
// now process the rest of the args
|
||||
for _, arg := range args {
|
||||
switch t := reflect.TypeOf(arg); {
|
||||
case t == reflect.TypeOf(float64(0)):
|
||||
break //ignore deprecated timeouts
|
||||
break // ignore deprecated timeouts
|
||||
case t == reflect.TypeOf(Focus):
|
||||
node.MarkedFocus = bool(arg.(focusType))
|
||||
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
|
||||
@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
node.Body = func(SpecContext) { body() }
|
||||
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
|
||||
if node.ReportEachBody == nil {
|
||||
node.ReportEachBody = arg.(func(types.SpecReport))
|
||||
if fn, ok := arg.(func(types.SpecReport)); ok {
|
||||
node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
|
||||
} else {
|
||||
node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
|
||||
node.HasContext = true
|
||||
}
|
||||
} else {
|
||||
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||
trackedFunctionError = true
|
||||
@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
}
|
||||
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
|
||||
if node.ReportSuiteBody == nil {
|
||||
node.ReportSuiteBody = arg.(func(types.Report))
|
||||
if fn, ok := arg.(func(types.Report)); ok {
|
||||
node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
|
||||
} else {
|
||||
node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
|
||||
node.HasContext = true
|
||||
}
|
||||
} else {
|
||||
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
|
||||
trackedFunctionError = true
|
||||
@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
|
||||
}
|
||||
}
|
||||
|
||||
//validations
|
||||
// validations
|
||||
if node.MarkedPending && node.MarkedFocus {
|
||||
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
|
||||
}
|
||||
|
||||
14
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
14
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
@ -594,8 +594,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
|
||||
suite.writer.Truncate()
|
||||
suite.outputInterceptor.StartInterceptingOutput()
|
||||
report := suite.currentSpecReport
|
||||
nodes[i].Body = func(SpecContext) {
|
||||
nodes[i].ReportEachBody(report)
|
||||
nodes[i].Body = func(ctx SpecContext) {
|
||||
nodes[i].ReportEachBody(ctx, report)
|
||||
}
|
||||
state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
|
||||
|
||||
@ -762,7 +762,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
|
||||
report = report.Add(aggregatedReport)
|
||||
}
|
||||
|
||||
node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
|
||||
node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
|
||||
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
|
||||
|
||||
suite.currentSpecReport.EndTime = time.Now()
|
||||
@ -840,7 +840,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
timeoutInPlay = "node"
|
||||
}
|
||||
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
|
||||
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
|
||||
// we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
|
||||
if node.NodeTimeout > 0 {
|
||||
deadline = now.Add(node.NodeTimeout)
|
||||
timeoutInPlay = "node"
|
||||
@ -918,9 +918,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
if outcomeFromRun != types.SpecStatePassed {
|
||||
additionalFailure := types.AdditionalFailure{
|
||||
State: outcomeFromRun,
|
||||
Failure: failure, //we make a copy - this will include all the configuration set up above...
|
||||
Failure: failure, // we make a copy - this will include all the configuration set up above...
|
||||
}
|
||||
//...and then we update the failure with the details from failureFromRun
|
||||
// ...and then we update the failure with the details from failureFromRun
|
||||
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
|
||||
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
|
||||
if outcome == types.SpecStateTimedout {
|
||||
@ -959,7 +959,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
|
||||
// the spec is actually stuck
|
||||
sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
|
||||
//and now we wait for the grace period
|
||||
// and now we wait for the grace period
|
||||
gracePeriodChannel = time.After(gracePeriod)
|
||||
case <-interruptStatus.Channel:
|
||||
interruptStatus = suite.interruptHandler.Status()
|
||||
|
||||
59
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
59
vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
generated
vendored
@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) {
|
||||
|
||||
/*
|
||||
ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
|
||||
receives a SpecReport. They are called before the spec starts.
|
||||
receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
|
||||
|
||||
Example:
|
||||
|
||||
ReportBeforeEach(func(report SpecReport) { // process report })
|
||||
ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
|
||||
// process report
|
||||
}), NodeTimeout(1 * time.Minute))
|
||||
|
||||
You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
|
||||
You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||
|
||||
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||
*/
|
||||
func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
|
||||
func ReportBeforeEach(body any, args ...any) bool {
|
||||
combinedArgs := []interface{}{body}
|
||||
combinedArgs = append(combinedArgs, args...)
|
||||
|
||||
@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
|
||||
}
|
||||
|
||||
/*
|
||||
ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that
|
||||
receives a SpecReport. They are called after the spec has completed and receive the final report for the spec.
|
||||
ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
|
||||
ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
|
||||
They are called after the spec has completed and receive the final report for the spec.
|
||||
|
||||
Example:
|
||||
|
||||
ReportAfterEach(func(report SpecReport) { // process report })
|
||||
ReportAfterEach(func(ctx SpecContext, report SpecReport) {
|
||||
// process report
|
||||
}), NodeTimeout(1 * time.Minute))
|
||||
|
||||
You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
|
||||
You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||
|
||||
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||
*/
|
||||
func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
|
||||
func ReportAfterEach(body any, args ...any) bool {
|
||||
combinedArgs := []interface{}{body}
|
||||
combinedArgs = append(combinedArgs, args...)
|
||||
|
||||
@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
|
||||
}
|
||||
|
||||
/*
|
||||
ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report.
|
||||
ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
|
||||
that can either receive Report or both SpecContext and Report for interruptible behavior.
|
||||
|
||||
Example Usage:
|
||||
|
||||
ReportBeforeSuite(func(r Report) { // process report })
|
||||
ReportBeforeSuite(func(ctx SpecContext, r Report) {
|
||||
// process report
|
||||
}, NodeTimeout(1 * time.Minute))
|
||||
|
||||
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
|
||||
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||
@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
||||
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||
|
||||
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||
|
||||
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||
*/
|
||||
func ReportBeforeSuite(body func(Report), args ...interface{}) bool {
|
||||
func ReportBeforeSuite(body any, args ...any) bool {
|
||||
combinedArgs := []interface{}{body}
|
||||
combinedArgs = append(combinedArgs, args...)
|
||||
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
|
||||
}
|
||||
|
||||
/*
|
||||
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
|
||||
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
|
||||
and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
|
||||
|
||||
Example Usage:
|
||||
|
||||
ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
|
||||
ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
|
||||
// process report
|
||||
}, NodeTimeout(1 * time.Minute))
|
||||
|
||||
They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
|
||||
ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||
ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
|
||||
|
||||
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
|
||||
all parallel nodes
|
||||
@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
|
||||
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
|
||||
|
||||
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
|
||||
|
||||
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
|
||||
*/
|
||||
func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
|
||||
func ReportAfterSuite(text string, body any, args ...interface{}) bool {
|
||||
combinedArgs := []interface{}{body}
|
||||
combinedArgs = append(combinedArgs, args...)
|
||||
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
@ -1,3 +1,3 @@
|
||||
package types
|
||||
|
||||
const VERSION = "2.15.0"
|
||||
const VERSION = "2.16.0"
|
||||
|
||||
266
vendor/golang.org/x/tools/cover/profile.go
generated
vendored
Normal file
266
vendor/golang.org/x/tools/cover/profile.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cover provides support for parsing coverage profiles
|
||||
// generated by "go test -coverprofile=cover.out".
|
||||
package cover // import "golang.org/x/tools/cover"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Profile represents the profiling data for a specific file.
|
||||
type Profile struct {
|
||||
FileName string
|
||||
Mode string
|
||||
Blocks []ProfileBlock
|
||||
}
|
||||
|
||||
// ProfileBlock represents a single block of profiling data.
|
||||
type ProfileBlock struct {
|
||||
StartLine, StartCol int
|
||||
EndLine, EndCol int
|
||||
NumStmt, Count int
|
||||
}
|
||||
|
||||
type byFileName []*Profile
|
||||
|
||||
func (p byFileName) Len() int { return len(p) }
|
||||
func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
|
||||
func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// ParseProfiles parses profile data in the specified file and returns a
|
||||
// Profile for each source file described therein.
|
||||
func ParseProfiles(fileName string) ([]*Profile, error) {
|
||||
pf, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer pf.Close()
|
||||
return ParseProfilesFromReader(pf)
|
||||
}
|
||||
|
||||
// ParseProfilesFromReader parses profile data from the Reader and
|
||||
// returns a Profile for each source file described therein.
|
||||
func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) {
|
||||
// First line is "mode: foo", where foo is "set", "count", or "atomic".
|
||||
// Rest of file is in the format
|
||||
// encoding/base64/base64.go:34.44,37.40 3 1
|
||||
// where the fields are: name.go:line.column,line.column numberOfStatements count
|
||||
files := make(map[string]*Profile)
|
||||
s := bufio.NewScanner(rd)
|
||||
mode := ""
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if mode == "" {
|
||||
const p = "mode: "
|
||||
if !strings.HasPrefix(line, p) || line == p {
|
||||
return nil, fmt.Errorf("bad mode line: %v", line)
|
||||
}
|
||||
mode = line[len(p):]
|
||||
continue
|
||||
}
|
||||
fn, b, err := parseLine(line)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err)
|
||||
}
|
||||
p := files[fn]
|
||||
if p == nil {
|
||||
p = &Profile{
|
||||
FileName: fn,
|
||||
Mode: mode,
|
||||
}
|
||||
files[fn] = p
|
||||
}
|
||||
p.Blocks = append(p.Blocks, b)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range files {
|
||||
sort.Sort(blocksByStart(p.Blocks))
|
||||
// Merge samples from the same location.
|
||||
j := 1
|
||||
for i := 1; i < len(p.Blocks); i++ {
|
||||
b := p.Blocks[i]
|
||||
last := p.Blocks[j-1]
|
||||
if b.StartLine == last.StartLine &&
|
||||
b.StartCol == last.StartCol &&
|
||||
b.EndLine == last.EndLine &&
|
||||
b.EndCol == last.EndCol {
|
||||
if b.NumStmt != last.NumStmt {
|
||||
return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
|
||||
}
|
||||
if mode == "set" {
|
||||
p.Blocks[j-1].Count |= b.Count
|
||||
} else {
|
||||
p.Blocks[j-1].Count += b.Count
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.Blocks[j] = b
|
||||
j++
|
||||
}
|
||||
p.Blocks = p.Blocks[:j]
|
||||
}
|
||||
// Generate a sorted slice.
|
||||
profiles := make([]*Profile, 0, len(files))
|
||||
for _, profile := range files {
|
||||
profiles = append(profiles, profile)
|
||||
}
|
||||
sort.Sort(byFileName(profiles))
|
||||
return profiles, nil
|
||||
}
|
||||
|
||||
// parseLine parses a line from a coverage file.
|
||||
// It is equivalent to the regex
|
||||
// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$
|
||||
//
|
||||
// However, it is much faster: https://golang.org/cl/179377
|
||||
func parseLine(l string) (fileName string, block ProfileBlock, err error) {
|
||||
end := len(l)
|
||||
|
||||
b := ProfileBlock{}
|
||||
b.Count, end, err = seekBack(l, ' ', end, "Count")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.EndCol, end, err = seekBack(l, '.', end, "EndCol")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.EndLine, end, err = seekBack(l, ',', end, "EndLine")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.StartCol, end, err = seekBack(l, '.', end, "StartCol")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.StartLine, end, err = seekBack(l, ':', end, "StartLine")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
fn := l[0:end]
|
||||
if fn == "" {
|
||||
return "", b, errors.New("a FileName cannot be blank")
|
||||
}
|
||||
return fn, b, nil
|
||||
}
|
||||
|
||||
// seekBack searches backwards from end to find sep in l, then returns the
|
||||
// value between sep and end as an integer.
|
||||
// If seekBack fails, the returned error will reference what.
|
||||
func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) {
|
||||
// Since we're seeking backwards and we know only ASCII is legal for these values,
|
||||
// we can ignore the possibility of non-ASCII characters.
|
||||
for start := end - 1; start >= 0; start-- {
|
||||
if l[start] == sep {
|
||||
i, err := strconv.Atoi(l[start+1 : end])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err)
|
||||
}
|
||||
if i < 0 {
|
||||
return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i)
|
||||
}
|
||||
return i, start, nil
|
||||
}
|
||||
}
|
||||
return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what)
|
||||
}
|
||||
|
||||
type blocksByStart []ProfileBlock
|
||||
|
||||
func (b blocksByStart) Len() int { return len(b) }
|
||||
func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b blocksByStart) Less(i, j int) bool {
|
||||
bi, bj := b[i], b[j]
|
||||
return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
|
||||
}
|
||||
|
||||
// Boundary represents the position in a source file of the beginning or end of a
|
||||
// block as reported by the coverage profile. In HTML mode, it will correspond to
|
||||
// the opening or closing of a <span> tag and will be used to colorize the source
|
||||
type Boundary struct {
|
||||
Offset int // Location as a byte offset in the source file.
|
||||
Start bool // Is this the start of a block?
|
||||
Count int // Event count from the cover profile.
|
||||
Norm float64 // Count normalized to [0..1].
|
||||
Index int // Order in input file.
|
||||
}
|
||||
|
||||
// Boundaries returns a Profile as a set of Boundary objects within the provided src.
|
||||
func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
|
||||
// Find maximum count.
|
||||
max := 0
|
||||
for _, b := range p.Blocks {
|
||||
if b.Count > max {
|
||||
max = b.Count
|
||||
}
|
||||
}
|
||||
// Divisor for normalization.
|
||||
divisor := math.Log(float64(max))
|
||||
|
||||
// boundary returns a Boundary, populating the Norm field with a normalized Count.
|
||||
index := 0
|
||||
boundary := func(offset int, start bool, count int) Boundary {
|
||||
b := Boundary{Offset: offset, Start: start, Count: count, Index: index}
|
||||
index++
|
||||
if !start || count == 0 {
|
||||
return b
|
||||
}
|
||||
if max <= 1 {
|
||||
b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
|
||||
} else if count > 0 {
|
||||
b.Norm = math.Log(float64(count)) / divisor
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
line, col := 1, 2 // TODO: Why is this 2?
|
||||
for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
|
||||
b := p.Blocks[bi]
|
||||
if b.StartLine == line && b.StartCol == col {
|
||||
boundaries = append(boundaries, boundary(si, true, b.Count))
|
||||
}
|
||||
if b.EndLine == line && b.EndCol == col || line > b.EndLine {
|
||||
boundaries = append(boundaries, boundary(si, false, 0))
|
||||
bi++
|
||||
continue // Don't advance through src; maybe the next block starts here.
|
||||
}
|
||||
if src[si] == '\n' {
|
||||
line++
|
||||
col = 0
|
||||
}
|
||||
col++
|
||||
si++
|
||||
}
|
||||
sort.Sort(boundariesByPos(boundaries))
|
||||
return
|
||||
}
|
||||
|
||||
type boundariesByPos []Boundary
|
||||
|
||||
func (b boundariesByPos) Len() int { return len(b) }
|
||||
func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b boundariesByPos) Less(i, j int) bool {
|
||||
if b[i].Offset == b[j].Offset {
|
||||
// Boundaries at the same offset should be ordered according to
|
||||
// their original position.
|
||||
return b[i].Index < b[j].Index
|
||||
}
|
||||
return b[i].Offset < b[j].Offset
|
||||
}
|
||||
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
@ -179,7 +179,7 @@ github.com/modern-go/reflect2
|
||||
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
## explicit
|
||||
github.com/munnerz/goautoneg
|
||||
# github.com/onsi/ginkgo/v2 v2.15.0
|
||||
# github.com/onsi/ginkgo/v2 v2.16.0
|
||||
## explicit; go 1.20
|
||||
github.com/onsi/ginkgo/v2
|
||||
github.com/onsi/ginkgo/v2/config
|
||||
@ -444,8 +444,9 @@ golang.org/x/text/width
|
||||
# golang.org/x/time v0.3.0
|
||||
## explicit
|
||||
golang.org/x/time/rate
|
||||
# golang.org/x/tools v0.16.1
|
||||
# golang.org/x/tools v0.17.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/tools/cover
|
||||
golang.org/x/tools/go/ast/inspector
|
||||
# google.golang.org/appengine v1.6.8
|
||||
## explicit; go 1.11
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user