mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 00:59:58 +00:00
TUN-7227: Migrate to devincarr/quic-go
The lucas-clemente/quic-go package moved namespaces and our branch went stale, this new fork provides support for the new quic-go repo and applies the max datagram frame size change. Until the max datagram frame size support gets upstreamed into quic-go, this can be used to unblock go 1.20 support as the old lucas-clemente/quic-go will not get go 1.20 support.
This commit is contained in:
20
vendor/github.com/onsi/ginkgo/v2/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
69
vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
generated
vendored
Normal file
69
vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package config
|
||||
|
||||
// GinkgoConfigType has been deprecated and its equivalent now lives in
|
||||
// the types package. You can no longer access Ginkgo configuration from the config
|
||||
// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
|
||||
// current configuration
|
||||
//
|
||||
// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error
|
||||
// It will be removed in a future minor release of Ginkgo
|
||||
type GinkgoConfigType = DeprecatedGinkgoConfigType
|
||||
type DeprecatedGinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
RegexScansFilePath bool
|
||||
FocusStrings []string
|
||||
SkipStrings []string
|
||||
SkipMeasurements bool
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
FlakeAttempts int
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
DebugParallel bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
SyncHost string
|
||||
StreamHost string
|
||||
}
|
||||
|
||||
// DefaultReporterConfigType has been deprecated and its equivalent now lives in
|
||||
// the types package. You can no longer access Ginkgo configuration from the config
|
||||
// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
|
||||
// current configuration
|
||||
//
|
||||
// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error
|
||||
// It will be removed in a future minor release of Ginkgo
|
||||
type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType
|
||||
type DeprecatedDefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
NoisySkippings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
ReportPassed bool
|
||||
ReportFile string
|
||||
}
|
||||
|
||||
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||
type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
|
||||
|
||||
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||
var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
|
||||
|
||||
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||
type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
|
||||
|
||||
// Sadly there is no way to gracefully deprecate access to these global config variables.
|
||||
// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
|
||||
// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
|
||||
var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
|
41
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
generated
vendored
Normal file
41
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
|
||||
|
||||
* go-colorable: <https://github.com/mattn/go-colorable>
|
||||
* go-isatty: <https://github.com/mattn/go-isatty>
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func newColorable(file *os.File) io.Writer {
|
||||
return file
|
||||
}
|
809
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
generated
vendored
Normal file
809
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
generated
vendored
Normal file
@@ -0,0 +1,809 @@
|
||||
/*
|
||||
These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
|
||||
|
||||
* go-colorable: <https://github.com/mattn/go-colorable>
|
||||
* go-isatty: <https://github.com/mattn/go-isatty>
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
|
||||
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
)
|
||||
|
||||
func isTerminal(fd uintptr) bool {
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
||||
|
||||
const (
|
||||
foregroundBlue = 0x1
|
||||
foregroundGreen = 0x2
|
||||
foregroundRed = 0x4
|
||||
foregroundIntensity = 0x8
|
||||
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
|
||||
backgroundBlue = 0x10
|
||||
backgroundGreen = 0x20
|
||||
backgroundRed = 0x40
|
||||
backgroundIntensity = 0x80
|
||||
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
|
||||
)
|
||||
|
||||
type wchar uint16
|
||||
type short int16
|
||||
type dword uint32
|
||||
type word uint16
|
||||
|
||||
type coord struct {
|
||||
x short
|
||||
y short
|
||||
}
|
||||
|
||||
type smallRect struct {
|
||||
left short
|
||||
top short
|
||||
right short
|
||||
bottom short
|
||||
}
|
||||
|
||||
type consoleScreenBufferInfo struct {
|
||||
size coord
|
||||
cursorPosition coord
|
||||
attributes word
|
||||
window smallRect
|
||||
maximumWindowSize coord
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
out io.Writer
|
||||
handle syscall.Handle
|
||||
lastbuf bytes.Buffer
|
||||
oldattr word
|
||||
}
|
||||
|
||||
func newColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
if isTerminal(file.Fd()) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
handle := syscall.Handle(file.Fd())
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
return &writer{out: file, handle: handle, oldattr: csbi.attributes}
|
||||
} else {
|
||||
return file
|
||||
}
|
||||
}
|
||||
|
||||
var color256 = map[int]int{
|
||||
0: 0x000000,
|
||||
1: 0x800000,
|
||||
2: 0x008000,
|
||||
3: 0x808000,
|
||||
4: 0x000080,
|
||||
5: 0x800080,
|
||||
6: 0x008080,
|
||||
7: 0xc0c0c0,
|
||||
8: 0x808080,
|
||||
9: 0xff0000,
|
||||
10: 0x00ff00,
|
||||
11: 0xffff00,
|
||||
12: 0x0000ff,
|
||||
13: 0xff00ff,
|
||||
14: 0x00ffff,
|
||||
15: 0xffffff,
|
||||
16: 0x000000,
|
||||
17: 0x00005f,
|
||||
18: 0x000087,
|
||||
19: 0x0000af,
|
||||
20: 0x0000d7,
|
||||
21: 0x0000ff,
|
||||
22: 0x005f00,
|
||||
23: 0x005f5f,
|
||||
24: 0x005f87,
|
||||
25: 0x005faf,
|
||||
26: 0x005fd7,
|
||||
27: 0x005fff,
|
||||
28: 0x008700,
|
||||
29: 0x00875f,
|
||||
30: 0x008787,
|
||||
31: 0x0087af,
|
||||
32: 0x0087d7,
|
||||
33: 0x0087ff,
|
||||
34: 0x00af00,
|
||||
35: 0x00af5f,
|
||||
36: 0x00af87,
|
||||
37: 0x00afaf,
|
||||
38: 0x00afd7,
|
||||
39: 0x00afff,
|
||||
40: 0x00d700,
|
||||
41: 0x00d75f,
|
||||
42: 0x00d787,
|
||||
43: 0x00d7af,
|
||||
44: 0x00d7d7,
|
||||
45: 0x00d7ff,
|
||||
46: 0x00ff00,
|
||||
47: 0x00ff5f,
|
||||
48: 0x00ff87,
|
||||
49: 0x00ffaf,
|
||||
50: 0x00ffd7,
|
||||
51: 0x00ffff,
|
||||
52: 0x5f0000,
|
||||
53: 0x5f005f,
|
||||
54: 0x5f0087,
|
||||
55: 0x5f00af,
|
||||
56: 0x5f00d7,
|
||||
57: 0x5f00ff,
|
||||
58: 0x5f5f00,
|
||||
59: 0x5f5f5f,
|
||||
60: 0x5f5f87,
|
||||
61: 0x5f5faf,
|
||||
62: 0x5f5fd7,
|
||||
63: 0x5f5fff,
|
||||
64: 0x5f8700,
|
||||
65: 0x5f875f,
|
||||
66: 0x5f8787,
|
||||
67: 0x5f87af,
|
||||
68: 0x5f87d7,
|
||||
69: 0x5f87ff,
|
||||
70: 0x5faf00,
|
||||
71: 0x5faf5f,
|
||||
72: 0x5faf87,
|
||||
73: 0x5fafaf,
|
||||
74: 0x5fafd7,
|
||||
75: 0x5fafff,
|
||||
76: 0x5fd700,
|
||||
77: 0x5fd75f,
|
||||
78: 0x5fd787,
|
||||
79: 0x5fd7af,
|
||||
80: 0x5fd7d7,
|
||||
81: 0x5fd7ff,
|
||||
82: 0x5fff00,
|
||||
83: 0x5fff5f,
|
||||
84: 0x5fff87,
|
||||
85: 0x5fffaf,
|
||||
86: 0x5fffd7,
|
||||
87: 0x5fffff,
|
||||
88: 0x870000,
|
||||
89: 0x87005f,
|
||||
90: 0x870087,
|
||||
91: 0x8700af,
|
||||
92: 0x8700d7,
|
||||
93: 0x8700ff,
|
||||
94: 0x875f00,
|
||||
95: 0x875f5f,
|
||||
96: 0x875f87,
|
||||
97: 0x875faf,
|
||||
98: 0x875fd7,
|
||||
99: 0x875fff,
|
||||
100: 0x878700,
|
||||
101: 0x87875f,
|
||||
102: 0x878787,
|
||||
103: 0x8787af,
|
||||
104: 0x8787d7,
|
||||
105: 0x8787ff,
|
||||
106: 0x87af00,
|
||||
107: 0x87af5f,
|
||||
108: 0x87af87,
|
||||
109: 0x87afaf,
|
||||
110: 0x87afd7,
|
||||
111: 0x87afff,
|
||||
112: 0x87d700,
|
||||
113: 0x87d75f,
|
||||
114: 0x87d787,
|
||||
115: 0x87d7af,
|
||||
116: 0x87d7d7,
|
||||
117: 0x87d7ff,
|
||||
118: 0x87ff00,
|
||||
119: 0x87ff5f,
|
||||
120: 0x87ff87,
|
||||
121: 0x87ffaf,
|
||||
122: 0x87ffd7,
|
||||
123: 0x87ffff,
|
||||
124: 0xaf0000,
|
||||
125: 0xaf005f,
|
||||
126: 0xaf0087,
|
||||
127: 0xaf00af,
|
||||
128: 0xaf00d7,
|
||||
129: 0xaf00ff,
|
||||
130: 0xaf5f00,
|
||||
131: 0xaf5f5f,
|
||||
132: 0xaf5f87,
|
||||
133: 0xaf5faf,
|
||||
134: 0xaf5fd7,
|
||||
135: 0xaf5fff,
|
||||
136: 0xaf8700,
|
||||
137: 0xaf875f,
|
||||
138: 0xaf8787,
|
||||
139: 0xaf87af,
|
||||
140: 0xaf87d7,
|
||||
141: 0xaf87ff,
|
||||
142: 0xafaf00,
|
||||
143: 0xafaf5f,
|
||||
144: 0xafaf87,
|
||||
145: 0xafafaf,
|
||||
146: 0xafafd7,
|
||||
147: 0xafafff,
|
||||
148: 0xafd700,
|
||||
149: 0xafd75f,
|
||||
150: 0xafd787,
|
||||
151: 0xafd7af,
|
||||
152: 0xafd7d7,
|
||||
153: 0xafd7ff,
|
||||
154: 0xafff00,
|
||||
155: 0xafff5f,
|
||||
156: 0xafff87,
|
||||
157: 0xafffaf,
|
||||
158: 0xafffd7,
|
||||
159: 0xafffff,
|
||||
160: 0xd70000,
|
||||
161: 0xd7005f,
|
||||
162: 0xd70087,
|
||||
163: 0xd700af,
|
||||
164: 0xd700d7,
|
||||
165: 0xd700ff,
|
||||
166: 0xd75f00,
|
||||
167: 0xd75f5f,
|
||||
168: 0xd75f87,
|
||||
169: 0xd75faf,
|
||||
170: 0xd75fd7,
|
||||
171: 0xd75fff,
|
||||
172: 0xd78700,
|
||||
173: 0xd7875f,
|
||||
174: 0xd78787,
|
||||
175: 0xd787af,
|
||||
176: 0xd787d7,
|
||||
177: 0xd787ff,
|
||||
178: 0xd7af00,
|
||||
179: 0xd7af5f,
|
||||
180: 0xd7af87,
|
||||
181: 0xd7afaf,
|
||||
182: 0xd7afd7,
|
||||
183: 0xd7afff,
|
||||
184: 0xd7d700,
|
||||
185: 0xd7d75f,
|
||||
186: 0xd7d787,
|
||||
187: 0xd7d7af,
|
||||
188: 0xd7d7d7,
|
||||
189: 0xd7d7ff,
|
||||
190: 0xd7ff00,
|
||||
191: 0xd7ff5f,
|
||||
192: 0xd7ff87,
|
||||
193: 0xd7ffaf,
|
||||
194: 0xd7ffd7,
|
||||
195: 0xd7ffff,
|
||||
196: 0xff0000,
|
||||
197: 0xff005f,
|
||||
198: 0xff0087,
|
||||
199: 0xff00af,
|
||||
200: 0xff00d7,
|
||||
201: 0xff00ff,
|
||||
202: 0xff5f00,
|
||||
203: 0xff5f5f,
|
||||
204: 0xff5f87,
|
||||
205: 0xff5faf,
|
||||
206: 0xff5fd7,
|
||||
207: 0xff5fff,
|
||||
208: 0xff8700,
|
||||
209: 0xff875f,
|
||||
210: 0xff8787,
|
||||
211: 0xff87af,
|
||||
212: 0xff87d7,
|
||||
213: 0xff87ff,
|
||||
214: 0xffaf00,
|
||||
215: 0xffaf5f,
|
||||
216: 0xffaf87,
|
||||
217: 0xffafaf,
|
||||
218: 0xffafd7,
|
||||
219: 0xffafff,
|
||||
220: 0xffd700,
|
||||
221: 0xffd75f,
|
||||
222: 0xffd787,
|
||||
223: 0xffd7af,
|
||||
224: 0xffd7d7,
|
||||
225: 0xffd7ff,
|
||||
226: 0xffff00,
|
||||
227: 0xffff5f,
|
||||
228: 0xffff87,
|
||||
229: 0xffffaf,
|
||||
230: 0xffffd7,
|
||||
231: 0xffffff,
|
||||
232: 0x080808,
|
||||
233: 0x121212,
|
||||
234: 0x1c1c1c,
|
||||
235: 0x262626,
|
||||
236: 0x303030,
|
||||
237: 0x3a3a3a,
|
||||
238: 0x444444,
|
||||
239: 0x4e4e4e,
|
||||
240: 0x585858,
|
||||
241: 0x626262,
|
||||
242: 0x6c6c6c,
|
||||
243: 0x767676,
|
||||
244: 0x808080,
|
||||
245: 0x8a8a8a,
|
||||
246: 0x949494,
|
||||
247: 0x9e9e9e,
|
||||
248: 0xa8a8a8,
|
||||
249: 0xb2b2b2,
|
||||
250: 0xbcbcbc,
|
||||
251: 0xc6c6c6,
|
||||
252: 0xd0d0d0,
|
||||
253: 0xdadada,
|
||||
254: 0xe4e4e4,
|
||||
255: 0xeeeeee,
|
||||
}
|
||||
|
||||
func (w *writer) Write(data []byte) (n int, err error) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
|
||||
er := bytes.NewBuffer(data)
|
||||
loop:
|
||||
for {
|
||||
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
if r1 == 0 {
|
||||
break loop
|
||||
}
|
||||
|
||||
c1, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
fmt.Fprint(w.out, string(c1))
|
||||
continue
|
||||
}
|
||||
c2, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var m rune
|
||||
for {
|
||||
c, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
w.lastbuf.Write(buf.Bytes())
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
m = c
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
|
||||
var csbi consoleScreenBufferInfo
|
||||
switch m {
|
||||
case 'A':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'B':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'C':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'D':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if n, err = strconv.Atoi(buf.String()); err == nil {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
}
|
||||
case 'E':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'F':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'G':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'H':
|
||||
token := strings.Split(buf.String(), ";")
|
||||
if len(token) != 2 {
|
||||
continue
|
||||
}
|
||||
n1, err := strconv.Atoi(token[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
n2, err := strconv.Atoi(token[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
csbi.cursorPosition.x = short(n2)
|
||||
csbi.cursorPosition.x = short(n1)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'J':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'K':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'm':
|
||||
attr := csbi.attributes
|
||||
cs := buf.String()
|
||||
if cs == "" {
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
|
||||
continue
|
||||
}
|
||||
token := strings.Split(cs, ";")
|
||||
for i := 0; i < len(token); i += 1 {
|
||||
ns := token[i]
|
||||
if n, err = strconv.Atoi(ns); err == nil {
|
||||
switch {
|
||||
case n == 0 || n == 100:
|
||||
attr = w.oldattr
|
||||
case 1 <= n && n <= 5:
|
||||
attr |= foregroundIntensity
|
||||
case n == 7:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 22 == n || n == 25 || n == 25:
|
||||
attr |= foregroundIntensity
|
||||
case n == 27:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 30 <= n && n <= 37:
|
||||
attr = (attr & backgroundMask)
|
||||
if (n-30)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-30)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-30)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case n == 38: // set foreground color.
|
||||
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256foreAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= backgroundMask
|
||||
attr |= n256foreAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & backgroundMask)
|
||||
}
|
||||
case n == 39: // reset foreground color.
|
||||
attr &= backgroundMask
|
||||
attr |= w.oldattr & foregroundMask
|
||||
case 40 <= n && n <= 47:
|
||||
attr = (attr & foregroundMask)
|
||||
if (n-40)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-40)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-40)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
case n == 48: // set background color.
|
||||
if i < len(token)-2 && token[i+1] == "5" {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256backAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= foregroundMask
|
||||
attr |= n256backAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & foregroundMask)
|
||||
}
|
||||
case n == 49: // reset foreground color.
|
||||
attr &= foregroundMask
|
||||
attr |= w.oldattr & backgroundMask
|
||||
case 90 <= n && n <= 97:
|
||||
attr = (attr & backgroundMask)
|
||||
attr |= foregroundIntensity
|
||||
if (n-90)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-90)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-90)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case 100 <= n && n <= 107:
|
||||
attr = (attr & foregroundMask)
|
||||
attr |= backgroundIntensity
|
||||
if (n-100)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-100)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-100)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
}
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(data) - w.lastbuf.Len(), nil
|
||||
}
|
||||
|
||||
type consoleColor struct {
|
||||
rgb int
|
||||
red bool
|
||||
green bool
|
||||
blue bool
|
||||
intensity bool
|
||||
}
|
||||
|
||||
func (c consoleColor) foregroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= foregroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c consoleColor) backgroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= backgroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var color16 = []consoleColor{
|
||||
consoleColor{0x000000, false, false, false, false},
|
||||
consoleColor{0x000080, false, false, true, false},
|
||||
consoleColor{0x008000, false, true, false, false},
|
||||
consoleColor{0x008080, false, true, true, false},
|
||||
consoleColor{0x800000, true, false, false, false},
|
||||
consoleColor{0x800080, true, false, true, false},
|
||||
consoleColor{0x808000, true, true, false, false},
|
||||
consoleColor{0xc0c0c0, true, true, true, false},
|
||||
consoleColor{0x808080, false, false, false, true},
|
||||
consoleColor{0x0000ff, false, false, true, true},
|
||||
consoleColor{0x00ff00, false, true, false, true},
|
||||
consoleColor{0x00ffff, false, true, true, true},
|
||||
consoleColor{0xff0000, true, false, false, true},
|
||||
consoleColor{0xff00ff, true, false, true, true},
|
||||
consoleColor{0xffff00, true, true, false, true},
|
||||
consoleColor{0xffffff, true, true, true, true},
|
||||
}
|
||||
|
||||
type hsv struct {
|
||||
h, s, v float32
|
||||
}
|
||||
|
||||
func (a hsv) dist(b hsv) float32 {
|
||||
dh := a.h - b.h
|
||||
switch {
|
||||
case dh > 0.5:
|
||||
dh = 1 - dh
|
||||
case dh < -0.5:
|
||||
dh = -1 - dh
|
||||
}
|
||||
ds := a.s - b.s
|
||||
dv := a.v - b.v
|
||||
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
|
||||
}
|
||||
|
||||
func toHSV(rgb int) hsv {
|
||||
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
|
||||
float32((rgb&0x00FF00)>>8)/256.0,
|
||||
float32(rgb&0x0000FF)/256.0
|
||||
min, max := minmax3f(r, g, b)
|
||||
h := max - min
|
||||
if h > 0 {
|
||||
if max == r {
|
||||
h = (g - b) / h
|
||||
if h < 0 {
|
||||
h += 6
|
||||
}
|
||||
} else if max == g {
|
||||
h = 2 + (b-r)/h
|
||||
} else {
|
||||
h = 4 + (r-g)/h
|
||||
}
|
||||
}
|
||||
h /= 6.0
|
||||
s := max - min
|
||||
if max != 0 {
|
||||
s /= max
|
||||
}
|
||||
v := max
|
||||
return hsv{h: h, s: s, v: v}
|
||||
}
|
||||
|
||||
type hsvTable []hsv
|
||||
|
||||
func toHSVTable(rgbTable []consoleColor) hsvTable {
|
||||
t := make(hsvTable, len(rgbTable))
|
||||
for i, c := range rgbTable {
|
||||
t[i] = toHSV(c.rgb)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t hsvTable) find(rgb int) consoleColor {
|
||||
hsv := toHSV(rgb)
|
||||
n := 7
|
||||
l := float32(5.0)
|
||||
for i, p := range t {
|
||||
d := hsv.dist(p)
|
||||
if d < l {
|
||||
l, n = d, i
|
||||
}
|
||||
}
|
||||
return color16[n]
|
||||
}
|
||||
|
||||
func minmax3f(a, b, c float32) (min, max float32) {
|
||||
if a < b {
|
||||
if b < c {
|
||||
return a, c
|
||||
} else if a < c {
|
||||
return a, b
|
||||
} else {
|
||||
return c, b
|
||||
}
|
||||
} else {
|
||||
if a < c {
|
||||
return b, c
|
||||
} else if b < c {
|
||||
return b, a
|
||||
} else {
|
||||
return c, a
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var n256foreAttr []word
|
||||
var n256backAttr []word
|
||||
|
||||
func n256setup() {
|
||||
n256foreAttr = make([]word, 256)
|
||||
n256backAttr = make([]word, 256)
|
||||
t := toHSVTable(color16)
|
||||
for i, rgb := range color256 {
|
||||
c := t.find(rgb)
|
||||
n256foreAttr[i] = c.foregroundAttr()
|
||||
n256backAttr[i] = c.backgroundAttr()
|
||||
}
|
||||
}
|
195
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
Normal file
195
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ColorableStdOut and ColorableStdErr enable color output support on Windows
|
||||
var ColorableStdOut = newColorable(os.Stdout)
|
||||
var ColorableStdErr = newColorable(os.Stderr)
|
||||
|
||||
const COLS = 80
|
||||
|
||||
type ColorMode uint8
|
||||
|
||||
const (
|
||||
ColorModeNone ColorMode = iota
|
||||
ColorModeTerminal
|
||||
ColorModePassthrough
|
||||
)
|
||||
|
||||
var SingletonFormatter = New(ColorModeTerminal)
|
||||
|
||||
func F(format string, args ...interface{}) string {
|
||||
return SingletonFormatter.F(format, args...)
|
||||
}
|
||||
|
||||
func Fi(indentation uint, format string, args ...interface{}) string {
|
||||
return SingletonFormatter.Fi(indentation, format, args...)
|
||||
}
|
||||
|
||||
func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||
return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
|
||||
}
|
||||
|
||||
type Formatter struct {
|
||||
ColorMode ColorMode
|
||||
colors map[string]string
|
||||
styleRe *regexp.Regexp
|
||||
preserveColorStylingTags bool
|
||||
}
|
||||
|
||||
func NewWithNoColorBool(noColor bool) Formatter {
|
||||
if noColor {
|
||||
return New(ColorModeNone)
|
||||
}
|
||||
return New(ColorModeTerminal)
|
||||
}
|
||||
|
||||
func New(colorMode ColorMode) Formatter {
|
||||
f := Formatter{
|
||||
ColorMode: colorMode,
|
||||
colors: map[string]string{
|
||||
"/": "\x1b[0m",
|
||||
"bold": "\x1b[1m",
|
||||
"underline": "\x1b[4m",
|
||||
|
||||
"red": "\x1b[38;5;9m",
|
||||
"orange": "\x1b[38;5;214m",
|
||||
"coral": "\x1b[38;5;204m",
|
||||
"magenta": "\x1b[38;5;13m",
|
||||
"green": "\x1b[38;5;10m",
|
||||
"dark-green": "\x1b[38;5;28m",
|
||||
"yellow": "\x1b[38;5;11m",
|
||||
"light-yellow": "\x1b[38;5;228m",
|
||||
"cyan": "\x1b[38;5;14m",
|
||||
"gray": "\x1b[38;5;243m",
|
||||
"light-gray": "\x1b[38;5;246m",
|
||||
"blue": "\x1b[38;5;12m",
|
||||
},
|
||||
}
|
||||
colors := []string{}
|
||||
for color := range f.colors {
|
||||
colors = append(colors, color)
|
||||
}
|
||||
f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}")
|
||||
return f
|
||||
}
|
||||
|
||||
func (f Formatter) F(format string, args ...interface{}) string {
|
||||
return f.Fi(0, format, args...)
|
||||
}
|
||||
|
||||
func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
|
||||
return f.Fiw(indentation, 0, format, args...)
|
||||
}
|
||||
|
||||
func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||
out := fmt.Sprintf(f.style(format), args...)
|
||||
|
||||
if indentation == 0 && maxWidth == 0 {
|
||||
return out
|
||||
}
|
||||
|
||||
lines := strings.Split(out, "\n")
|
||||
|
||||
if maxWidth != 0 {
|
||||
outLines := []string{}
|
||||
|
||||
maxWidth = maxWidth - indentation*2
|
||||
for _, line := range lines {
|
||||
if f.length(line) <= maxWidth {
|
||||
outLines = append(outLines, line)
|
||||
continue
|
||||
}
|
||||
words := strings.Split(line, " ")
|
||||
outWords := []string{words[0]}
|
||||
length := uint(f.length(words[0]))
|
||||
for _, word := range words[1:] {
|
||||
wordLength := f.length(word)
|
||||
if length+wordLength+1 <= maxWidth {
|
||||
length += wordLength + 1
|
||||
outWords = append(outWords, word)
|
||||
continue
|
||||
}
|
||||
outLines = append(outLines, strings.Join(outWords, " "))
|
||||
outWords = []string{word}
|
||||
length = wordLength
|
||||
}
|
||||
if len(outWords) > 0 {
|
||||
outLines = append(outLines, strings.Join(outWords, " "))
|
||||
}
|
||||
}
|
||||
|
||||
lines = outLines
|
||||
}
|
||||
|
||||
if indentation == 0 {
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
padding := strings.Repeat(" ", int(indentation))
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = padding + lines[i]
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func (f Formatter) length(styled string) uint {
|
||||
n := uint(0)
|
||||
inStyle := false
|
||||
for _, b := range styled {
|
||||
if inStyle {
|
||||
if b == 'm' {
|
||||
inStyle = false
|
||||
}
|
||||
continue
|
||||
}
|
||||
if b == '\x1b' {
|
||||
inStyle = true
|
||||
continue
|
||||
}
|
||||
n += 1
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string {
|
||||
if len(elements) == 0 {
|
||||
return ""
|
||||
}
|
||||
n := len(cycle)
|
||||
out := ""
|
||||
for i, text := range elements {
|
||||
out += cycle[i%n] + text
|
||||
if i < len(elements)-1 {
|
||||
out += joiner
|
||||
}
|
||||
}
|
||||
out += "{{/}}"
|
||||
return f.style(out)
|
||||
}
|
||||
|
||||
func (f Formatter) style(s string) string {
|
||||
switch f.ColorMode {
|
||||
case ColorModeNone:
|
||||
return f.styleRe.ReplaceAllString(s, "")
|
||||
case ColorModePassthrough:
|
||||
return s
|
||||
case ColorModeTerminal:
|
||||
return f.styleRe.ReplaceAllStringFunc(s, func(match string) string {
|
||||
if out, ok := f.colors[strings.Trim(match, "{}")]; ok {
|
||||
return out
|
||||
}
|
||||
return match
|
||||
})
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
Normal file
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildBuildCommand() command.Command {
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||
|
||||
flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "build",
|
||||
Flags: flags,
|
||||
Usage: "ginkgo build <FLAGS> <PACKAGES>",
|
||||
ShortDoc: "Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
DocLink: "precompiling-suites",
|
||||
Command: func(args []string, _ []string) {
|
||||
var errors []error
|
||||
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||
|
||||
buildSpecs(args, cliConfig, goFlagsConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) {
|
||||
suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
|
||||
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||
|
||||
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
|
||||
opc.StartCompiling(suites, goFlagsConfig)
|
||||
|
||||
for {
|
||||
suiteIdx, suite := opc.Next()
|
||||
if suiteIdx >= len(suites) {
|
||||
break
|
||||
}
|
||||
suites[suiteIdx] = suite
|
||||
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||
fmt.Println(suite.CompilationError.Error())
|
||||
} else {
|
||||
fmt.Printf("Compiled %s.test\n", suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 {
|
||||
command.AbortWith("Failed to compile all tests")
|
||||
}
|
||||
}
|
61
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
generated
vendored
Normal file
61
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package command
|
||||
|
||||
import "fmt"
|
||||
|
||||
type AbortDetails struct {
|
||||
ExitCode int
|
||||
Error error
|
||||
EmitUsage bool
|
||||
}
|
||||
|
||||
func Abort(details AbortDetails) {
|
||||
panic(details)
|
||||
}
|
||||
|
||||
func AbortGracefullyWith(format string, args ...interface{}) {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 0,
|
||||
Error: fmt.Errorf(format, args...),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
|
||||
func AbortWith(format string, args ...interface{}) {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf(format, args...),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
|
||||
func AbortWithUsage(format string, args ...interface{}) {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf(format, args...),
|
||||
EmitUsage: true,
|
||||
})
|
||||
}
|
||||
|
||||
func AbortIfError(preamble string, err error) {
|
||||
if err != nil {
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf("%s\n%s", preamble, err.Error()),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func AbortIfErrors(preamble string, errors []error) {
|
||||
if len(errors) > 0 {
|
||||
out := ""
|
||||
for _, err := range errors {
|
||||
out += err.Error()
|
||||
}
|
||||
Abort(AbortDetails{
|
||||
ExitCode: 1,
|
||||
Error: fmt.Errorf("%s\n%s", preamble, out),
|
||||
EmitUsage: false,
|
||||
})
|
||||
}
|
||||
}
|
50
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
generated
vendored
Normal file
50
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
Flags types.GinkgoFlagSet
|
||||
Usage string
|
||||
ShortDoc string
|
||||
Documentation string
|
||||
DocLink string
|
||||
Command func(args []string, additionalArgs []string)
|
||||
}
|
||||
|
||||
func (c Command) Run(args []string, additionalArgs []string) {
|
||||
args, err := c.Flags.Parse(args)
|
||||
if err != nil {
|
||||
AbortWithUsage(err.Error())
|
||||
}
|
||||
|
||||
c.Command(args, additionalArgs)
|
||||
}
|
||||
|
||||
func (c Command) EmitUsage(writer io.Writer) {
|
||||
fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
|
||||
fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
|
||||
if c.ShortDoc != "" {
|
||||
fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
|
||||
fmt.Fprintln(writer, "")
|
||||
}
|
||||
if c.Documentation != "" {
|
||||
fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
|
||||
fmt.Fprintln(writer, "")
|
||||
}
|
||||
if c.DocLink != "" {
|
||||
fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
|
||||
fmt.Fprintln(writer, "")
|
||||
}
|
||||
flagUsage := c.Flags.Usage()
|
||||
if flagUsage != "" {
|
||||
fmt.Fprintf(writer, formatter.F(flagUsage))
|
||||
}
|
||||
}
|
182
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
generated
vendored
Normal file
182
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type Program struct {
|
||||
Name string
|
||||
Heading string
|
||||
Commands []Command
|
||||
DefaultCommand Command
|
||||
DeprecatedCommands []DeprecatedCommand
|
||||
|
||||
//For testing - leave as nil in production
|
||||
OutWriter io.Writer
|
||||
ErrWriter io.Writer
|
||||
Exiter func(code int)
|
||||
}
|
||||
|
||||
type DeprecatedCommand struct {
|
||||
Name string
|
||||
Deprecation types.Deprecation
|
||||
}
|
||||
|
||||
func (p Program) RunAndExit(osArgs []string) {
|
||||
var command Command
|
||||
deprecationTracker := types.NewDeprecationTracker()
|
||||
if p.Exiter == nil {
|
||||
p.Exiter = os.Exit
|
||||
}
|
||||
if p.OutWriter == nil {
|
||||
p.OutWriter = formatter.ColorableStdOut
|
||||
}
|
||||
if p.ErrWriter == nil {
|
||||
p.ErrWriter = formatter.ColorableStdErr
|
||||
}
|
||||
|
||||
defer func() {
|
||||
exitCode := 0
|
||||
|
||||
if r := recover(); r != nil {
|
||||
details, ok := r.(AbortDetails)
|
||||
if !ok {
|
||||
panic(r)
|
||||
}
|
||||
|
||||
if details.Error != nil {
|
||||
fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
|
||||
fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
|
||||
}
|
||||
if details.EmitUsage {
|
||||
if details.Error != nil {
|
||||
fmt.Fprintln(p.ErrWriter, "")
|
||||
}
|
||||
command.EmitUsage(p.ErrWriter)
|
||||
}
|
||||
exitCode = details.ExitCode
|
||||
}
|
||||
|
||||
command.Flags.ValidateDeprecations(deprecationTracker)
|
||||
if deprecationTracker.DidTrackDeprecations() {
|
||||
fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
|
||||
}
|
||||
p.Exiter(exitCode)
|
||||
return
|
||||
}()
|
||||
|
||||
args, additionalArgs := []string{}, []string{}
|
||||
|
||||
foundDelimiter := false
|
||||
for _, arg := range osArgs[1:] {
|
||||
if !foundDelimiter {
|
||||
if arg == "--" {
|
||||
foundDelimiter = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if foundDelimiter {
|
||||
additionalArgs = append(additionalArgs, arg)
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
command = p.DefaultCommand
|
||||
if len(args) > 0 {
|
||||
p.handleHelpRequestsAndExit(p.OutWriter, args)
|
||||
if command.Name == args[0] {
|
||||
args = args[1:]
|
||||
} else {
|
||||
for _, deprecatedCommand := range p.DeprecatedCommands {
|
||||
if deprecatedCommand.Name == args[0] {
|
||||
deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, tryCommand := range p.Commands {
|
||||
if tryCommand.Name == args[0] {
|
||||
command, args = tryCommand, args[1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
command.Run(args, additionalArgs)
|
||||
}
|
||||
|
||||
func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
matchesHelpFlag := func(args ...string) bool {
|
||||
for _, arg := range args {
|
||||
if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if len(args) == 1 {
|
||||
if args[0] == "help" || matchesHelpFlag(args[0]) {
|
||||
p.EmitUsage(writer)
|
||||
Abort(AbortDetails{})
|
||||
}
|
||||
} else {
|
||||
var name string
|
||||
if args[0] == "help" || matchesHelpFlag(args[0]) {
|
||||
name = args[1]
|
||||
} else if matchesHelpFlag(args[1:]...) {
|
||||
name = args[0]
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
if p.DefaultCommand.Name == name || p.Name == name {
|
||||
p.DefaultCommand.EmitUsage(writer)
|
||||
Abort(AbortDetails{})
|
||||
}
|
||||
for _, command := range p.Commands {
|
||||
if command.Name == name {
|
||||
command.EmitUsage(writer)
|
||||
Abort(AbortDetails{})
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
|
||||
fmt.Fprintln(writer, "")
|
||||
p.EmitUsage(writer)
|
||||
Abort(AbortDetails{ExitCode: 1})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p Program) EmitUsage(writer io.Writer) {
|
||||
fmt.Fprintln(writer, formatter.F(p.Heading))
|
||||
fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
|
||||
fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
|
||||
fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
|
||||
fmt.Fprintln(writer, "")
|
||||
fmt.Fprintln(writer, formatter.F("The following commands are available:"))
|
||||
|
||||
fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
|
||||
if p.DefaultCommand.ShortDoc != "" {
|
||||
fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
|
||||
}
|
||||
|
||||
for _, command := range p.Commands {
|
||||
fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
|
||||
if command.ShortDoc != "" {
|
||||
fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
|
||||
}
|
||||
}
|
||||
}
|
48
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package generators
|
||||
|
||||
var bootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
|
||||
{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
`
|
||||
|
||||
var agoutiBootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
|
||||
{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
|
||||
var agoutiDriver *agouti.WebDriver
|
||||
|
||||
var _ = {{.GinkgoPackage}}BeforeSuite(func() {
|
||||
// Choose a WebDriver:
|
||||
|
||||
agoutiDriver = agouti.PhantomJS()
|
||||
// agoutiDriver = agouti.Selenium()
|
||||
// agoutiDriver = agouti.ChromeDriver()
|
||||
|
||||
{{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
|
||||
})
|
||||
|
||||
var _ = {{.GinkgoPackage}}AfterSuite(func() {
|
||||
{{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
|
||||
})
|
||||
`
|
113
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
Normal file
113
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildBootstrapCommand() command.Command {
|
||||
conf := GeneratorsConfig{}
|
||||
flags, err := types.NewGinkgoFlagSet(
|
||||
types.GinkgoFlags{
|
||||
{Name: "agouti", KeyPath: "Agouti",
|
||||
Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
|
||||
{Name: "nodot", KeyPath: "NoDot",
|
||||
Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
|
||||
{Name: "internal", KeyPath: "Internal",
|
||||
Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
|
||||
{Name: "template", KeyPath: "CustomTemplate",
|
||||
UsageArgument: "template-file",
|
||||
Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"},
|
||||
},
|
||||
&conf,
|
||||
types.GinkgoFlagSections{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "bootstrap",
|
||||
Usage: "ginkgo bootstrap",
|
||||
ShortDoc: "Bootstrap a test suite for the current package",
|
||||
Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
|
||||
|
||||
{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
|
||||
DocLink: "generators",
|
||||
Flags: flags,
|
||||
Command: func(_ []string, _ []string) {
|
||||
generateBootstrap(conf)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type bootstrapData struct {
|
||||
Package string
|
||||
FormattedName string
|
||||
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
GinkgoPackage string
|
||||
GomegaPackage string
|
||||
}
|
||||
|
||||
func generateBootstrap(conf GeneratorsConfig) {
|
||||
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
|
||||
data := bootstrapData{
|
||||
Package: determinePackageName(packageName, conf.Internal),
|
||||
FormattedName: formattedName,
|
||||
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
GinkgoPackage: "",
|
||||
GomegaPackage: "",
|
||||
}
|
||||
|
||||
if conf.NoDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
data.GinkgoPackage = `ginkgo.`
|
||||
data.GomegaPackage = `gomega.`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||
if internal.FileExists(targetFile) {
|
||||
command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
command.AbortIfError("Failed to create file:", err)
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if conf.CustomTemplate != "" {
|
||||
tpl, err := os.ReadFile(conf.CustomTemplate)
|
||||
command.AbortIfError("Failed to read custom bootstrap file:", err)
|
||||
templateText = string(tpl)
|
||||
} else if conf.Agouti {
|
||||
templateText = agoutiBootstrapText
|
||||
} else {
|
||||
templateText = bootstrapText
|
||||
}
|
||||
|
||||
bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Parse(templateText)
|
||||
command.AbortIfError("Failed to parse bootstrap template:", err)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
bootstrapTemplate.Execute(buf, data)
|
||||
|
||||
buf.WriteTo(f)
|
||||
|
||||
internal.GoFmt(targetFile)
|
||||
}
|
239
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
Normal file
239
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildGenerateCommand() command.Command {
|
||||
conf := GeneratorsConfig{}
|
||||
flags, err := types.NewGinkgoFlagSet(
|
||||
types.GinkgoFlags{
|
||||
{Name: "agouti", KeyPath: "Agouti",
|
||||
Usage: "If set, generate will create a test file for writing Agouti tests"},
|
||||
{Name: "nodot", KeyPath: "NoDot",
|
||||
Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
|
||||
{Name: "internal", KeyPath: "Internal",
|
||||
Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
|
||||
{Name: "template", KeyPath: "CustomTemplate",
|
||||
UsageArgument: "template-file",
|
||||
Usage: "If specified, generate will use the contents of the file passed as the test file template"},
|
||||
},
|
||||
&conf,
|
||||
types.GinkgoFlagSections{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "generate",
|
||||
Usage: "ginkgo generate <filename(s)>",
|
||||
ShortDoc: "Generate a test file named <filename>_test.go",
|
||||
Documentation: `If the optional <filename> argument is omitted, a file named after the package in the current directory will be created.
|
||||
|
||||
You can pass multiple <filename(s)> to generate multiple files simultaneously. The resulting files are named <filename>_test.go.
|
||||
|
||||
You can also pass a <filename> of the form "file.go" and generate will emit "file_test.go".`,
|
||||
DocLink: "generators",
|
||||
Flags: flags,
|
||||
Command: func(args []string, _ []string) {
|
||||
generateTestFiles(conf, args)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type specData struct {
|
||||
Package string
|
||||
Subject string
|
||||
PackageImportPath string
|
||||
ImportPackage bool
|
||||
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
GinkgoPackage string
|
||||
GomegaPackage string
|
||||
}
|
||||
|
||||
func generateTestFiles(conf GeneratorsConfig, args []string) {
|
||||
subjects := args
|
||||
if len(subjects) == 0 {
|
||||
subjects = []string{""}
|
||||
}
|
||||
for _, subject := range subjects {
|
||||
generateTestFileForSubject(subject, conf)
|
||||
}
|
||||
}
|
||||
|
||||
func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
|
||||
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
if subject != "" {
|
||||
specFilePrefix = formatSubject(subject)
|
||||
formattedName = prettifyName(specFilePrefix)
|
||||
}
|
||||
|
||||
if conf.Internal {
|
||||
specFilePrefix = specFilePrefix + "_internal"
|
||||
}
|
||||
|
||||
data := specData{
|
||||
Package: determinePackageName(packageName, conf.Internal),
|
||||
Subject: formattedName,
|
||||
PackageImportPath: getPackageImportPath(),
|
||||
ImportPackage: !conf.Internal,
|
||||
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
GinkgoPackage: "",
|
||||
GomegaPackage: "",
|
||||
}
|
||||
|
||||
if conf.NoDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
data.GinkgoPackage = `ginkgo.`
|
||||
data.GomegaPackage = `gomega.`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
|
||||
if internal.FileExists(targetFile) {
|
||||
command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
command.AbortIfError("Failed to create test file:", err)
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if conf.CustomTemplate != "" {
|
||||
tpl, err := os.ReadFile(conf.CustomTemplate)
|
||||
command.AbortIfError("Failed to read custom template file:", err)
|
||||
templateText = string(tpl)
|
||||
} else if conf.Agouti {
|
||||
templateText = agoutiSpecText
|
||||
} else {
|
||||
templateText = specText
|
||||
}
|
||||
|
||||
specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Parse(templateText)
|
||||
command.AbortIfError("Failed to read parse test template:", err)
|
||||
|
||||
specTemplate.Execute(f, data)
|
||||
internal.GoFmt(targetFile)
|
||||
}
|
||||
|
||||
func formatSubject(name string) string {
|
||||
name = strings.ReplaceAll(name, "-", "_")
|
||||
name = strings.ReplaceAll(name, " ", "_")
|
||||
name = strings.Split(name, ".go")[0]
|
||||
name = strings.Split(name, "_test")[0]
|
||||
return name
|
||||
}
|
||||
|
||||
// moduleName returns module name from go.mod from given module root directory
|
||||
func moduleName(modRoot string) string {
|
||||
modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
mod := make([]byte, 128)
|
||||
_, err = modFile.Read(mod)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
slashSlash := []byte("//")
|
||||
moduleStr := []byte("module")
|
||||
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
|
||||
return "" // missing module path
|
||||
}
|
||||
|
||||
func findModuleRoot(dir string) (root string) {
|
||||
dir = filepath.Clean(dir)
|
||||
|
||||
// Look for enclosing go.mod.
|
||||
for {
|
||||
if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
|
||||
return dir
|
||||
}
|
||||
d := filepath.Dir(dir)
|
||||
if d == dir {
|
||||
break
|
||||
}
|
||||
dir = d
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getPackageImportPath() string {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
sep := string(filepath.Separator)
|
||||
|
||||
// Try go.mod file first
|
||||
modRoot := findModuleRoot(workingDir)
|
||||
if modRoot != "" {
|
||||
modName := moduleName(modRoot)
|
||||
if modName != "" {
|
||||
cd := strings.ReplaceAll(workingDir, modRoot, "")
|
||||
cd = strings.ReplaceAll(cd, sep, "/")
|
||||
return modName + cd
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to GOPATH structure
|
||||
paths := strings.Split(workingDir, sep+"src"+sep)
|
||||
if len(paths) == 1 {
|
||||
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
|
||||
return "UNKNOWN_PACKAGE_PATH"
|
||||
}
|
||||
return filepath.ToSlash(paths[len(paths)-1])
|
||||
}
|
41
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
Normal file
41
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package generators
|
||||
|
||||
var specText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
|
||||
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
||||
|
||||
})
|
||||
`
|
||||
|
||||
var agoutiSpecText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
. "github.com/sclevine/agouti/matchers"
|
||||
|
||||
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
|
||||
var page *agouti.Page
|
||||
|
||||
{{.GinkgoPackage}}BeforeEach(func() {
|
||||
var err error
|
||||
page, err = agoutiDriver.NewPage()
|
||||
{{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred())
|
||||
})
|
||||
|
||||
{{.GinkgoPackage}}AfterEach(func() {
|
||||
{{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed())
|
||||
})
|
||||
})
|
||||
`
|
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
Normal file
63
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package generators
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
)
|
||||
|
||||
type GeneratorsConfig struct {
|
||||
Agouti, NoDot, Internal bool
|
||||
CustomTemplate string
|
||||
}
|
||||
|
||||
func getPackageAndFormattedName() (string, string, string) {
|
||||
path, err := os.Getwd()
|
||||
command.AbortIfError("Could not get current working directory:", err)
|
||||
|
||||
dirName := strings.ReplaceAll(filepath.Base(path), "-", "_")
|
||||
dirName = strings.ReplaceAll(dirName, " ", "_")
|
||||
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
packageName := pkg.Name
|
||||
if err != nil {
|
||||
packageName = ensureLegalPackageName(dirName)
|
||||
}
|
||||
|
||||
formattedName := prettifyName(filepath.Base(path))
|
||||
return packageName, dirName, formattedName
|
||||
}
|
||||
|
||||
func ensureLegalPackageName(name string) string {
|
||||
if name == "_" {
|
||||
return "underscore"
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return "empty"
|
||||
}
|
||||
n, isDigitErr := strconv.Atoi(string(name[0]))
|
||||
if isDigitErr == nil {
|
||||
return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func prettifyName(name string) string {
|
||||
name = strings.ReplaceAll(name, "-", " ")
|
||||
name = strings.ReplaceAll(name, "_", " ")
|
||||
name = strings.Title(name)
|
||||
name = strings.ReplaceAll(name, " ", "")
|
||||
return name
|
||||
}
|
||||
|
||||
func determinePackageName(name string, internal bool) string {
|
||||
if internal {
|
||||
return name
|
||||
}
|
||||
|
||||
return name + "_test"
|
||||
}
|
152
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
Normal file
152
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
|
||||
if suite.PathToCompiledTest != "" {
|
||||
return suite
|
||||
}
|
||||
|
||||
suite.CompilationError = nil
|
||||
|
||||
path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
|
||||
if err != nil {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error())
|
||||
return suite
|
||||
}
|
||||
|
||||
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./")
|
||||
if err != nil {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
|
||||
return suite
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Dir = suite.Path
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
if len(output) > 0 {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output)
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error())
|
||||
}
|
||||
return suite
|
||||
}
|
||||
|
||||
if strings.Contains(string(output), "[no test files]") {
|
||||
suite.State = TestSuiteStateSkippedDueToEmptyCompilation
|
||||
return suite
|
||||
}
|
||||
|
||||
if len(output) > 0 {
|
||||
fmt.Println(string(output))
|
||||
}
|
||||
|
||||
if !FileExists(path) {
|
||||
suite.State = TestSuiteStateFailedToCompile
|
||||
suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path)
|
||||
return suite
|
||||
}
|
||||
|
||||
suite.State = TestSuiteStateCompiled
|
||||
suite.PathToCompiledTest = path
|
||||
return suite
|
||||
}
|
||||
|
||||
func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) {
|
||||
if goFlagsConfig.BinaryMustBePreserved() {
|
||||
return
|
||||
}
|
||||
for _, suite := range suites {
|
||||
if !suite.Precompiled {
|
||||
os.Remove(suite.PathToCompiledTest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type parallelSuiteBundle struct {
|
||||
suite TestSuite
|
||||
compiled chan TestSuite
|
||||
}
|
||||
|
||||
type OrderedParallelCompiler struct {
|
||||
mutex *sync.Mutex
|
||||
stopped bool
|
||||
numCompilers int
|
||||
|
||||
idx int
|
||||
numSuites int
|
||||
completionChannels []chan TestSuite
|
||||
}
|
||||
|
||||
func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
|
||||
return &OrderedParallelCompiler{
|
||||
mutex: &sync.Mutex{},
|
||||
numCompilers: numCompilers,
|
||||
}
|
||||
}
|
||||
|
||||
func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
|
||||
opc.stopped = false
|
||||
opc.idx = 0
|
||||
opc.numSuites = len(suites)
|
||||
opc.completionChannels = make([]chan TestSuite, opc.numSuites)
|
||||
|
||||
toCompile := make(chan parallelSuiteBundle, opc.numCompilers)
|
||||
for compiler := 0; compiler < opc.numCompilers; compiler++ {
|
||||
go func() {
|
||||
for bundle := range toCompile {
|
||||
c, suite := bundle.compiled, bundle.suite
|
||||
opc.mutex.Lock()
|
||||
stopped := opc.stopped
|
||||
opc.mutex.Unlock()
|
||||
if !stopped {
|
||||
suite = CompileSuite(suite, goFlagsConfig)
|
||||
}
|
||||
c <- suite
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for idx, suite := range suites {
|
||||
opc.completionChannels[idx] = make(chan TestSuite, 1)
|
||||
toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]}
|
||||
if idx == 0 { //compile first suite serially
|
||||
suite = <-opc.completionChannels[0]
|
||||
opc.completionChannels[0] <- suite
|
||||
}
|
||||
}
|
||||
|
||||
close(toCompile)
|
||||
}
|
||||
|
||||
func (opc *OrderedParallelCompiler) Next() (int, TestSuite) {
|
||||
if opc.idx >= opc.numSuites {
|
||||
return opc.numSuites, TestSuite{}
|
||||
}
|
||||
|
||||
idx := opc.idx
|
||||
suite := <-opc.completionChannels[idx]
|
||||
opc.idx = opc.idx + 1
|
||||
|
||||
return idx, suite
|
||||
}
|
||||
|
||||
func (opc *OrderedParallelCompiler) StopAndDrain() {
|
||||
opc.mutex.Lock()
|
||||
opc.stopped = true
|
||||
opc.mutex.Unlock()
|
||||
}
|
237
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
Normal file
237
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/google/pprof/profile"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
|
||||
suffix := ""
|
||||
if process != 0 {
|
||||
suffix = fmt.Sprintf(".%d", process)
|
||||
}
|
||||
if cliConfig.OutputDir == "" {
|
||||
return filepath.Join(suite.AbsPath(), assetName+suffix)
|
||||
}
|
||||
outputDir, _ := filepath.Abs(cliConfig.OutputDir)
|
||||
return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix)
|
||||
}
|
||||
|
||||
func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) {
|
||||
messages := []string{}
|
||||
suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile
|
||||
|
||||
// merge cover profiles if need be
|
||||
if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles {
|
||||
coverProfiles := []string{}
|
||||
for _, suite := range suitesWithProfiles {
|
||||
if !suite.HasProgrammaticFocus {
|
||||
coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0))
|
||||
}
|
||||
}
|
||||
|
||||
if len(coverProfiles) > 0 {
|
||||
dst := goFlagsConfig.CoverProfile
|
||||
if cliConfig.OutputDir != "" {
|
||||
dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile)
|
||||
}
|
||||
err := MergeAndCleanupCoverProfiles(coverProfiles, dst)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
coverage, err := GetCoverageFromCoverProfile(dst)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
if coverage == 0 {
|
||||
messages = append(messages, "composite coverage: [no statements]")
|
||||
} else if suitesWithProfiles.AnyHaveProgrammaticFocus() {
|
||||
messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage))
|
||||
} else {
|
||||
messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage))
|
||||
}
|
||||
} else {
|
||||
messages = append(messages, "no composite coverage computed: all suites included programatically focused specs")
|
||||
}
|
||||
}
|
||||
|
||||
// copy binaries if need be
|
||||
for _, suite := range suitesWithProfiles {
|
||||
if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" {
|
||||
src := suite.PathToCompiledTest
|
||||
dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test")
|
||||
if suite.Precompiled {
|
||||
if err := CopyFile(src, dst); err != nil {
|
||||
return messages, err
|
||||
}
|
||||
} else {
|
||||
if err := os.Rename(src, dst); err != nil {
|
||||
return messages, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type reportFormat struct {
|
||||
ReportName string
|
||||
GenerateFunc func(types.Report, string) error
|
||||
MergeFunc func([]string, string) ([]string, error)
|
||||
}
|
||||
reportFormats := []reportFormat{}
|
||||
if reporterConfig.JSONReport != "" {
|
||||
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
|
||||
}
|
||||
if reporterConfig.JUnitReport != "" {
|
||||
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
|
||||
}
|
||||
if reporterConfig.TeamcityReport != "" {
|
||||
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports})
|
||||
}
|
||||
|
||||
// Generate reports for suites that failed to run
|
||||
reportableSuites := suites.ThatAreGinkgoSuites()
|
||||
for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) {
|
||||
report := types.Report{
|
||||
SuitePath: suite.AbsPath(),
|
||||
SuiteConfig: suiteConfig,
|
||||
SuiteSucceeded: false,
|
||||
}
|
||||
switch suite.State {
|
||||
case TestSuiteStateFailedToCompile:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error())
|
||||
case TestSuiteStateFailedDueToTimeout:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON)
|
||||
case TestSuiteStateSkippedDueToPriorFailures:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON)
|
||||
case TestSuiteStateSkippedDueToEmptyCompilation:
|
||||
report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON)
|
||||
report.SuiteSucceeded = true
|
||||
}
|
||||
|
||||
for _, format := range reportFormats {
|
||||
format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
|
||||
}
|
||||
}
|
||||
|
||||
// Merge reports unless we've been asked to keep them separate
|
||||
if !cliConfig.KeepSeparateReports {
|
||||
for _, format := range reportFormats {
|
||||
reports := []string{}
|
||||
for _, suite := range reportableSuites {
|
||||
reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
|
||||
}
|
||||
dst := format.ReportName
|
||||
if cliConfig.OutputDir != "" {
|
||||
dst = filepath.Join(cliConfig.OutputDir, format.ReportName)
|
||||
}
|
||||
mergeMessages, err := format.MergeFunc(reports, dst)
|
||||
messages = append(messages, mergeMessages...)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
//loads each profile, combines them, deletes them, stores them in destination
|
||||
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
|
||||
combined := &bytes.Buffer{}
|
||||
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||
for i, profile := range profiles {
|
||||
contents, err := os.ReadFile(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
|
||||
}
|
||||
os.Remove(profile)
|
||||
|
||||
// remove the cover mode line from every file
|
||||
// except the first one
|
||||
if i > 0 {
|
||||
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||
}
|
||||
|
||||
_, err = combined.Write(contents)
|
||||
|
||||
// Add a newline to the end of every file if missing.
|
||||
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||
_, err = combined.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
err := os.WriteFile(destination, combined.Bytes(), 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetCoverageFromCoverProfile(profile string) (float64, error) {
|
||||
cmd := exec.Command("go", "tool", "cover", "-func", profile)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
|
||||
}
|
||||
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
|
||||
matches := re.FindStringSubmatch(string(output))
|
||||
if matches == nil {
|
||||
return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage")
|
||||
}
|
||||
coverageString := matches[1]
|
||||
coverage, err := strconv.ParseFloat(coverageString, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error())
|
||||
}
|
||||
|
||||
return coverage, nil
|
||||
}
|
||||
|
||||
func MergeProfiles(profilePaths []string, destination string) error {
|
||||
profiles := []*profile.Profile{}
|
||||
for _, profilePath := range profilePaths {
|
||||
proFile, err := os.Open(profilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
|
||||
}
|
||||
prof, err := profile.Parse(proFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
|
||||
}
|
||||
profiles = append(profiles, prof)
|
||||
os.Remove(profilePath)
|
||||
}
|
||||
|
||||
mergedProfile, err := profile.Merge(profiles)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not merge profiles:\n%s", err.Error())
|
||||
}
|
||||
|
||||
outFile, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error())
|
||||
}
|
||||
err = mergedProfile.Write(outFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error())
|
||||
}
|
||||
err = outFile.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
348
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
generated
vendored
Normal file
348
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
generated
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||
suite.State = TestSuiteStateFailed
|
||||
suite.HasProgrammaticFocus = false
|
||||
|
||||
if suite.PathToCompiledTest == "" {
|
||||
return suite
|
||||
}
|
||||
|
||||
if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 {
|
||||
suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
|
||||
} else if suite.IsGinkgo {
|
||||
suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
|
||||
} else {
|
||||
suite = runGoTest(suite, cliConfig, goFlagsConfig)
|
||||
}
|
||||
runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite)
|
||||
return suite
|
||||
}
|
||||
|
||||
func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) {
|
||||
buf := &bytes.Buffer{}
|
||||
cmd := exec.Command(suite.PathToCompiledTest, args...)
|
||||
cmd.Dir = suite.Path
|
||||
if pipeToStdout {
|
||||
cmd.Stderr = io.MultiWriter(os.Stdout, buf)
|
||||
cmd.Stdout = os.Stdout
|
||||
} else {
|
||||
cmd.Stderr = buf
|
||||
cmd.Stdout = buf
|
||||
}
|
||||
err := cmd.Start()
|
||||
command.AbortIfError("Failed to start test suite", err)
|
||||
|
||||
return cmd, buf
|
||||
}
|
||||
|
||||
func checkForNoTestsWarning(buf *bytes.Buffer) bool {
|
||||
if strings.Contains(buf.String(), "warning: no tests to run") {
|
||||
fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite {
|
||||
args, err := types.GenerateGoTestRunArgs(goFlagsConfig)
|
||||
command.AbortIfError("Failed to generate test run arguments", err)
|
||||
cmd, buf := buildAndStartCommand(suite, args, true)
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
|
||||
if passed {
|
||||
suite.State = TestSuiteStatePassed
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
|
||||
return suite
|
||||
}
|
||||
|
||||
func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||
if goFlagsConfig.Cover {
|
||||
goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.BlockProfile != "" {
|
||||
goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.CPUProfile != "" {
|
||||
goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.MemProfile != "" {
|
||||
goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if goFlagsConfig.MutexProfile != "" {
|
||||
goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.JSONReport != "" {
|
||||
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.JUnitReport != "" {
|
||||
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.TeamcityReport != "" {
|
||||
reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
|
||||
}
|
||||
|
||||
args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig)
|
||||
command.AbortIfError("Failed to generate test run arguments", err)
|
||||
args = append([]string{"--test.timeout=0"}, args...)
|
||||
args = append(args, additionalArgs...)
|
||||
|
||||
cmd, buf := buildAndStartCommand(suite, args, true)
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
|
||||
if passed {
|
||||
suite.State = TestSuiteStatePassed
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
|
||||
if suite.HasProgrammaticFocus {
|
||||
if goFlagsConfig.Cover {
|
||||
fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.BlockProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.CPUProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.MemProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
|
||||
}
|
||||
if goFlagsConfig.MutexProfile != "" {
|
||||
fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
|
||||
}
|
||||
}
|
||||
|
||||
return suite
|
||||
}
|
||||
|
||||
func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
|
||||
type procResult struct {
|
||||
passed bool
|
||||
hasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
numProcs := cliConfig.ComputedProcs()
|
||||
procOutput := make([]*bytes.Buffer, numProcs)
|
||||
coverProfiles := []string{}
|
||||
|
||||
blockProfiles := []string{}
|
||||
cpuProfiles := []string{}
|
||||
memProfiles := []string{}
|
||||
mutexProfiles := []string{}
|
||||
|
||||
procResults := make(chan procResult)
|
||||
|
||||
server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut))
|
||||
command.AbortIfError("Failed to start parallel spec server", err)
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
if reporterConfig.JSONReport != "" {
|
||||
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.JUnitReport != "" {
|
||||
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
|
||||
}
|
||||
if reporterConfig.TeamcityReport != "" {
|
||||
reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
|
||||
}
|
||||
|
||||
for proc := 1; proc <= numProcs; proc++ {
|
||||
procGinkgoConfig := ginkgoConfig
|
||||
procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address()
|
||||
|
||||
procGoFlagsConfig := goFlagsConfig
|
||||
if goFlagsConfig.Cover {
|
||||
procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc)
|
||||
coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile)
|
||||
}
|
||||
if goFlagsConfig.BlockProfile != "" {
|
||||
procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc)
|
||||
blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile)
|
||||
}
|
||||
if goFlagsConfig.CPUProfile != "" {
|
||||
procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc)
|
||||
cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile)
|
||||
}
|
||||
if goFlagsConfig.MemProfile != "" {
|
||||
procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc)
|
||||
memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile)
|
||||
}
|
||||
if goFlagsConfig.MutexProfile != "" {
|
||||
procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc)
|
||||
mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile)
|
||||
}
|
||||
|
||||
args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig)
|
||||
command.AbortIfError("Failed to generate test run arguments", err)
|
||||
args = append([]string{"--test.timeout=0"}, args...)
|
||||
args = append(args, additionalArgs...)
|
||||
|
||||
cmd, buf := buildAndStartCommand(suite, args, false)
|
||||
procOutput[proc-1] = buf
|
||||
server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() })
|
||||
|
||||
go func() {
|
||||
cmd.Wait()
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
procResults <- procResult{
|
||||
passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE),
|
||||
hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE,
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
passed := true
|
||||
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||
result := <-procResults
|
||||
passed = passed && result.passed
|
||||
suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus
|
||||
}
|
||||
if passed {
|
||||
suite.State = TestSuiteStatePassed
|
||||
} else {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
|
||||
select {
|
||||
case <-server.GetSuiteDone():
|
||||
fmt.Println("")
|
||||
case <-time.After(time.Second):
|
||||
//one of the nodes never finished reporting to the server. Something must have gone wrong.
|
||||
fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n"))
|
||||
fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path))
|
||||
fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n"))
|
||||
fmt.Fprintln(formatter.ColorableStdErr, " ")
|
||||
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||
fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc))
|
||||
fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String()))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "** End **")
|
||||
}
|
||||
|
||||
for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
|
||||
output := procOutput[proc-1].String()
|
||||
if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite {
|
||||
suite.State = TestSuiteStateFailed
|
||||
}
|
||||
if strings.Contains(output, "deprecated Ginkgo functionality") {
|
||||
fmt.Fprintln(os.Stderr, output)
|
||||
}
|
||||
}
|
||||
|
||||
if len(coverProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
|
||||
err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile)
|
||||
command.AbortIfError("Failed to combine cover profiles", err)
|
||||
|
||||
coverage, err := GetCoverageFromCoverProfile(coverProfile)
|
||||
command.AbortIfError("Failed to compute coverage", err)
|
||||
if coverage == 0 {
|
||||
fmt.Fprintln(os.Stdout, "coverage: [no statements]")
|
||||
} else {
|
||||
fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(blockProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(blockProfiles, blockProfile)
|
||||
command.AbortIfError("Failed to combine blockprofiles", err)
|
||||
}
|
||||
}
|
||||
if len(cpuProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(cpuProfiles, cpuProfile)
|
||||
command.AbortIfError("Failed to combine cpuprofiles", err)
|
||||
}
|
||||
}
|
||||
if len(memProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(memProfiles, memProfile)
|
||||
command.AbortIfError("Failed to combine memprofiles", err)
|
||||
}
|
||||
}
|
||||
if len(mutexProfiles) > 0 {
|
||||
if suite.HasProgrammaticFocus {
|
||||
fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
|
||||
} else {
|
||||
mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
|
||||
err := MergeProfiles(mutexProfiles, mutexProfile)
|
||||
command.AbortIfError("Failed to combine mutexprofiles", err)
|
||||
}
|
||||
}
|
||||
|
||||
return suite
|
||||
}
|
||||
|
||||
func runAfterRunHook(command string, noColor bool, suite TestSuite) {
|
||||
if command == "" {
|
||||
return
|
||||
}
|
||||
f := formatter.NewWithNoColorBool(noColor)
|
||||
|
||||
// Allow for string replacement to pass input to the command
|
||||
passed := "[FAIL]"
|
||||
if suite.State.Is(TestSuiteStatePassed) {
|
||||
passed = "[PASS]"
|
||||
}
|
||||
command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed)
|
||||
command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName)
|
||||
|
||||
// Must break command into parts
|
||||
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||
parts := splitArgs.FindAllString(command, -1)
|
||||
|
||||
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}"))
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output))
|
||||
} else {
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}"))
|
||||
fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output))
|
||||
}
|
||||
}
|
283
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
Normal file
283
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
generated
vendored
Normal file
@@ -0,0 +1,283 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed"
|
||||
const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set"
|
||||
const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found"
|
||||
|
||||
type TestSuiteState uint
|
||||
|
||||
const (
|
||||
TestSuiteStateInvalid TestSuiteState = iota
|
||||
|
||||
TestSuiteStateUncompiled
|
||||
TestSuiteStateCompiled
|
||||
|
||||
TestSuiteStatePassed
|
||||
|
||||
TestSuiteStateSkippedDueToEmptyCompilation
|
||||
TestSuiteStateSkippedByFilter
|
||||
TestSuiteStateSkippedDueToPriorFailures
|
||||
|
||||
TestSuiteStateFailed
|
||||
TestSuiteStateFailedDueToTimeout
|
||||
TestSuiteStateFailedToCompile
|
||||
)
|
||||
|
||||
var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile}
|
||||
|
||||
func (state TestSuiteState) Is(states ...TestSuiteState) bool {
|
||||
for _, suiteState := range states {
|
||||
if suiteState == state {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type TestSuite struct {
|
||||
Path string
|
||||
PackageName string
|
||||
IsGinkgo bool
|
||||
|
||||
Precompiled bool
|
||||
PathToCompiledTest string
|
||||
CompilationError error
|
||||
|
||||
HasProgrammaticFocus bool
|
||||
State TestSuiteState
|
||||
}
|
||||
|
||||
func (ts TestSuite) AbsPath() string {
|
||||
path, _ := filepath.Abs(ts.Path)
|
||||
return path
|
||||
}
|
||||
|
||||
func (ts TestSuite) NamespacedName() string {
|
||||
name := relPath(ts.Path)
|
||||
name = strings.TrimLeft(name, "."+string(filepath.Separator))
|
||||
name = strings.ReplaceAll(name, string(filepath.Separator), "_")
|
||||
name = strings.ReplaceAll(name, " ", "_")
|
||||
if name == "" {
|
||||
return ts.PackageName
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type TestSuites []TestSuite
|
||||
|
||||
func (ts TestSuites) AnyHaveProgrammaticFocus() bool {
|
||||
for _, suite := range ts {
|
||||
if suite.HasProgrammaticFocus {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (ts TestSuites) ThatAreGinkgoSuites() TestSuites {
|
||||
out := TestSuites{}
|
||||
for _, suite := range ts {
|
||||
if suite.IsGinkgo {
|
||||
out = append(out, suite)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ts TestSuites) CountWithState(states ...TestSuiteState) int {
|
||||
n := 0
|
||||
for _, suite := range ts {
|
||||
if suite.State.Is(states...) {
|
||||
n += 1
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites {
|
||||
out := TestSuites{}
|
||||
for _, suite := range ts {
|
||||
if suite.State.Is(states...) {
|
||||
out = append(out, suite)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites {
|
||||
out := TestSuites{}
|
||||
for _, suite := range ts {
|
||||
if !suite.State.Is(states...) {
|
||||
out = append(out, suite)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (ts TestSuites) ShuffledCopy(seed int64) TestSuites {
|
||||
out := make(TestSuites, len(ts))
|
||||
permutation := rand.New(rand.NewSource(seed)).Perm(len(ts))
|
||||
for i, j := range permutation {
|
||||
out[i] = ts[j]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites {
|
||||
suites := TestSuites{}
|
||||
|
||||
if len(args) > 0 {
|
||||
for _, arg := range args {
|
||||
if allowPrecompiled {
|
||||
suite, err := precompiledTestSuite(arg)
|
||||
if err == nil {
|
||||
suites = append(suites, suite)
|
||||
continue
|
||||
}
|
||||
}
|
||||
recurseForSuite := cliConfig.Recurse
|
||||
if strings.HasSuffix(arg, "/...") && arg != "/..." {
|
||||
arg = arg[:len(arg)-4]
|
||||
recurseForSuite = true
|
||||
}
|
||||
suites = append(suites, suitesInDir(arg, recurseForSuite)...)
|
||||
}
|
||||
} else {
|
||||
suites = suitesInDir(".", cliConfig.Recurse)
|
||||
}
|
||||
|
||||
if cliConfig.SkipPackage != "" {
|
||||
skipFilters := strings.Split(cliConfig.SkipPackage, ",")
|
||||
for idx := range suites {
|
||||
for _, skipFilter := range skipFilters {
|
||||
if strings.Contains(suites[idx].Path, skipFilter) {
|
||||
suites[idx].State = TestSuiteStateSkippedByFilter
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func precompiledTestSuite(path string) (TestSuite, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" {
|
||||
return TestSuite{}, errors.New("this is not a .test binary")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 {
|
||||
return TestSuite{}, errors.New("this is not executable")
|
||||
}
|
||||
|
||||
dir := relPath(filepath.Dir(path))
|
||||
packageName := strings.TrimSuffix(filepath.Base(path), ".exe")
|
||||
packageName = strings.TrimSuffix(packageName, ".test")
|
||||
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
return TestSuite{
|
||||
Path: dir,
|
||||
PackageName: packageName,
|
||||
IsGinkgo: true,
|
||||
Precompiled: true,
|
||||
PathToCompiledTest: path,
|
||||
State: TestSuiteStateCompiled,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func suitesInDir(dir string, recurse bool) TestSuites {
|
||||
suites := TestSuites{}
|
||||
|
||||
if path.Base(dir) == "vendor" {
|
||||
return suites
|
||||
}
|
||||
|
||||
files, _ := os.ReadDir(dir)
|
||||
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
suite := TestSuite{
|
||||
Path: relPath(dir),
|
||||
PackageName: packageNameForSuite(dir),
|
||||
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||
State: TestSuiteStateUncompiled,
|
||||
}
|
||||
suites = append(suites, suite)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if recurse {
|
||||
re = regexp.MustCompile(`^[._]`)
|
||||
for _, file := range files {
|
||||
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func relPath(dir string) string {
|
||||
dir, _ = filepath.Abs(dir)
|
||||
cwd, _ := os.Getwd()
|
||||
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||
|
||||
if string(dir[0]) != "." {
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func packageNameForSuite(dir string) string {
|
||||
path, _ := filepath.Abs(dir)
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
|
||||
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||
contents, _ := os.ReadFile(dir + "/" + file.Name())
|
||||
if reGinkgo.Match(contents) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
86
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
generated
vendored
Normal file
86
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
)
|
||||
|
||||
func FileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func CopyFile(src string, dest string) error {
|
||||
srcFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcStat, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dest); err == nil {
|
||||
os.Remove(dest)
|
||||
}
|
||||
|
||||
destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(destFile, srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srcFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return destFile.Close()
|
||||
}
|
||||
|
||||
func GoFmt(path string) {
|
||||
out, err := exec.Command("go", "fmt", path).CombinedOutput()
|
||||
if err != nil {
|
||||
command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err)
|
||||
}
|
||||
}
|
||||
|
||||
func PluralizedWord(singular, plural string, count int) string {
|
||||
if count == 1 {
|
||||
return singular
|
||||
}
|
||||
return plural
|
||||
}
|
||||
|
||||
func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string {
|
||||
out := ""
|
||||
out += "There were failures detected in the following suites:\n"
|
||||
|
||||
maxPackageNameLength := 0
|
||||
for _, suite := range suites.WithState(TestSuiteStateFailureStates...) {
|
||||
if len(suite.PackageName) > maxPackageNameLength {
|
||||
maxPackageNameLength = len(suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||
for _, suite := range suites {
|
||||
switch suite.State {
|
||||
case TestSuiteStateFailed:
|
||||
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path)
|
||||
case TestSuiteStateFailedToCompile:
|
||||
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path)
|
||||
case TestSuiteStateFailedDueToTimeout:
|
||||
out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
54
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
generated
vendored
Normal file
54
vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`)
|
||||
|
||||
func VerifyCLIAndFrameworkVersion(suites TestSuites) {
|
||||
cliVersion := types.VERSION
|
||||
mismatches := map[string][]string{}
|
||||
|
||||
for _, suite := range suites {
|
||||
cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2")
|
||||
cmd.Dir = suite.Path
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
components := strings.Split(string(output), " ")
|
||||
if len(components) != 2 {
|
||||
continue
|
||||
}
|
||||
matches := versiorRe.FindStringSubmatch(components[1])
|
||||
if matches == nil || len(matches) != 2 {
|
||||
continue
|
||||
}
|
||||
libraryVersion := matches[1]
|
||||
if cliVersion != libraryVersion {
|
||||
mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
if len(mismatches) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}"))
|
||||
|
||||
fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:"))
|
||||
fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion))
|
||||
fmt.Println(formatter.Fi(1, "Mismatched package versions found:"))
|
||||
for version, packages := range mismatches {
|
||||
fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", ")))
|
||||
}
|
||||
fmt.Println("")
|
||||
fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}"))
|
||||
}
|
123
vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
generated
vendored
Normal file
123
vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
func BuildLabelsCommand() command.Command {
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
|
||||
flags, err := types.BuildLabelsCommandFlagSet(&cliConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "labels",
|
||||
Usage: "ginkgo labels <FLAGS> <PACKAGES>",
|
||||
Flags: flags,
|
||||
ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).",
|
||||
DocLink: "spec-labels",
|
||||
Command: func(args []string, _ []string) {
|
||||
ListLabels(args, cliConfig)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ListLabels(args []string, cliConfig types.CLIConfig) {
|
||||
suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
for _, suite := range suites {
|
||||
labels := fetchLabelsFromPackage(suite.Path)
|
||||
if len(labels) == 0 {
|
||||
fmt.Printf("%s: No labels found\n", suite.PackageName)
|
||||
} else {
|
||||
fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fetchLabelsFromPackage(packagePath string) []string {
|
||||
fset := token.NewFileSet()
|
||||
parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0)
|
||||
command.AbortIfError("Failed to parse package source:", err)
|
||||
|
||||
files := []*ast.File{}
|
||||
hasTestPackage := false
|
||||
for key, pkg := range parsedPackages {
|
||||
if strings.HasSuffix(key, "_test") {
|
||||
hasTestPackage = true
|
||||
for _, file := range pkg.Files {
|
||||
files = append(files, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasTestPackage {
|
||||
for _, pkg := range parsedPackages {
|
||||
for _, file := range pkg.Files {
|
||||
files = append(files, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seen := map[string]bool{}
|
||||
labels := []string{}
|
||||
ispr := inspector.New(files)
|
||||
ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) {
|
||||
potentialLabels := fetchLabels(n.(*ast.CallExpr))
|
||||
for _, label := range potentialLabels {
|
||||
if !seen[label] {
|
||||
seen[label] = true
|
||||
labels = append(labels, strconv.Quote(label))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
sort.Strings(labels)
|
||||
return labels
|
||||
}
|
||||
|
||||
func fetchLabels(callExpr *ast.CallExpr) []string {
|
||||
out := []string{}
|
||||
switch expr := callExpr.Fun.(type) {
|
||||
case *ast.Ident:
|
||||
if expr.Name != "Label" {
|
||||
return out
|
||||
}
|
||||
case *ast.SelectorExpr:
|
||||
if expr.Sel.Name != "Label" {
|
||||
return out
|
||||
}
|
||||
default:
|
||||
return out
|
||||
}
|
||||
for _, arg := range callExpr.Args {
|
||||
switch expr := arg.(type) {
|
||||
case *ast.BasicLit:
|
||||
if expr.Kind == token.STRING {
|
||||
unquoted, err := strconv.Unquote(expr.Value)
|
||||
if err != nil {
|
||||
unquoted = expr.Value
|
||||
}
|
||||
validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
|
||||
if err == nil {
|
||||
out = append(out, validated)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
58
vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
generated
vendored
Normal file
58
vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/build"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/generators"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/labels"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/outline"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/run"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/unfocus"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/watch"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
var program command.Program
|
||||
|
||||
func GenerateCommands() []command.Command {
|
||||
return []command.Command{
|
||||
watch.BuildWatchCommand(),
|
||||
build.BuildBuildCommand(),
|
||||
generators.BuildBootstrapCommand(),
|
||||
generators.BuildGenerateCommand(),
|
||||
labels.BuildLabelsCommand(),
|
||||
outline.BuildOutlineCommand(),
|
||||
unfocus.BuildUnfocusCommand(),
|
||||
BuildVersionCommand(),
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
program = command.Program{
|
||||
Name: "ginkgo",
|
||||
Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION),
|
||||
Commands: GenerateCommands(),
|
||||
DefaultCommand: run.BuildRunCommand(),
|
||||
DeprecatedCommands: []command.DeprecatedCommand{
|
||||
{Name: "convert", Deprecation: types.Deprecations.Convert()},
|
||||
{Name: "blur", Deprecation: types.Deprecations.Blur()},
|
||||
{Name: "nodot", Deprecation: types.Deprecations.Nodot()},
|
||||
},
|
||||
}
|
||||
|
||||
program.RunAndExit(os.Args)
|
||||
}
|
||||
|
||||
func BuildVersionCommand() command.Command {
|
||||
return command.Command{
|
||||
Name: "version",
|
||||
Usage: "ginkgo version",
|
||||
ShortDoc: "Print Ginkgo's version",
|
||||
Command: func(_ []string, _ []string) {
|
||||
fmt.Printf("Ginkgo Version %s\n", types.VERSION)
|
||||
},
|
||||
}
|
||||
}
|
218
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
218
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
@@ -0,0 +1,218 @@
|
||||
package outline
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// undefinedTextAlt is used if the spec/container text cannot be derived
|
||||
undefinedTextAlt = "undefined"
|
||||
)
|
||||
|
||||
// ginkgoMetadata holds useful bits of information for every entry in the outline
|
||||
type ginkgoMetadata struct {
|
||||
// Name is the spec or container function name, e.g. `Describe` or `It`
|
||||
Name string `json:"name"`
|
||||
|
||||
// Text is the `text` argument passed to specs, and some containers
|
||||
Text string `json:"text"`
|
||||
|
||||
// Start is the position of first character of the spec or container block
|
||||
Start int `json:"start"`
|
||||
|
||||
// End is the position of first character immediately after the spec or container block
|
||||
End int `json:"end"`
|
||||
|
||||
Spec bool `json:"spec"`
|
||||
Focused bool `json:"focused"`
|
||||
Pending bool `json:"pending"`
|
||||
}
|
||||
|
||||
// ginkgoNode is used to construct the outline as a tree
|
||||
type ginkgoNode struct {
|
||||
ginkgoMetadata
|
||||
Nodes []*ginkgoNode `json:"nodes"`
|
||||
}
|
||||
|
||||
type walkFunc func(n *ginkgoNode)
|
||||
|
||||
func (n *ginkgoNode) PreOrder(f walkFunc) {
|
||||
f(n)
|
||||
for _, m := range n.Nodes {
|
||||
m.PreOrder(f)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ginkgoNode) PostOrder(f walkFunc) {
|
||||
for _, m := range n.Nodes {
|
||||
m.PostOrder(f)
|
||||
}
|
||||
f(n)
|
||||
}
|
||||
|
||||
func (n *ginkgoNode) Walk(pre, post walkFunc) {
|
||||
pre(n)
|
||||
for _, m := range n.Nodes {
|
||||
m.Walk(pre, post)
|
||||
}
|
||||
post(n)
|
||||
}
|
||||
|
||||
// PropagateInheritedProperties propagates the Pending and Focused properties
|
||||
// through the subtree rooted at n.
|
||||
func (n *ginkgoNode) PropagateInheritedProperties() {
|
||||
n.PreOrder(func(thisNode *ginkgoNode) {
|
||||
for _, descendantNode := range thisNode.Nodes {
|
||||
if thisNode.Pending {
|
||||
descendantNode.Pending = true
|
||||
descendantNode.Focused = false
|
||||
}
|
||||
if thisNode.Focused && !descendantNode.Pending {
|
||||
descendantNode.Focused = true
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BackpropagateUnfocus propagates the Focused property through the subtree
|
||||
// rooted at n. It applies the rule described in the Ginkgo docs:
|
||||
// > Nested programmatically focused specs follow a simple rule: if a
|
||||
// > leaf-node is marked focused, any of its ancestor nodes that are marked
|
||||
// > focus will be unfocused.
|
||||
func (n *ginkgoNode) BackpropagateUnfocus() {
|
||||
focusedSpecInSubtreeStack := []bool{}
|
||||
n.PostOrder(func(thisNode *ginkgoNode) {
|
||||
if thisNode.Spec {
|
||||
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused)
|
||||
return
|
||||
}
|
||||
focusedSpecInSubtree := false
|
||||
for range thisNode.Nodes {
|
||||
focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1]
|
||||
focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1]
|
||||
}
|
||||
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree)
|
||||
if focusedSpecInSubtree {
|
||||
thisNode.Focused = false
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) {
|
||||
switch ex := ce.Fun.(type) {
|
||||
case *ast.Ident:
|
||||
return "", ex.Name, true
|
||||
case *ast.SelectorExpr:
|
||||
pkgID, ok := ex.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return "", "", false
|
||||
}
|
||||
// A package identifier is top-level, so Obj must be nil
|
||||
if pkgID.Obj != nil {
|
||||
return "", "", false
|
||||
}
|
||||
if ex.Sel == nil {
|
||||
return "", "", false
|
||||
}
|
||||
return pkgID.Name, ex.Sel.Name, true
|
||||
default:
|
||||
return "", "", false
|
||||
}
|
||||
}
|
||||
|
||||
// absoluteOffsetsForNode derives the absolute character offsets of the node start and
|
||||
// end positions.
|
||||
func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
|
||||
return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset
|
||||
}
|
||||
|
||||
// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
|
||||
// corresponding to a Ginkgo container or spec.
|
||||
func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) {
|
||||
packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
n := ginkgoNode{}
|
||||
n.Name = identName
|
||||
n.Start, n.End = absoluteOffsetsForNode(fset, ce)
|
||||
n.Nodes = make([]*ginkgoNode, 0)
|
||||
switch identName {
|
||||
case "It", "Specify", "Entry":
|
||||
n.Spec = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "FIt", "FSpecify", "FEntry":
|
||||
n.Spec = true
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry":
|
||||
n.Spec = true
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "Context", "Describe", "When", "DescribeTable":
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "FContext", "FDescribe", "FWhen", "FDescribeTable":
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable":
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "By":
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "AfterEach", "BeforeEach":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "JustAfterEach", "JustBeforeEach":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "AfterSuite", "BeforeSuite":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "SynchronizedAfterSuite", "SynchronizedBeforeSuite":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or
|
||||
// container. If it cannot derive it, it returns the alt text.
|
||||
func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string {
|
||||
text, defined := textFromCallExpr(ce)
|
||||
if !defined {
|
||||
return alt
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If
|
||||
// it cannot derive it, it returns false.
|
||||
func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
|
||||
if len(ce.Args) < 1 {
|
||||
return "", false
|
||||
}
|
||||
text, ok := ce.Args[0].(*ast.BasicLit)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
switch text.Kind {
|
||||
case token.CHAR, token.STRING:
|
||||
// For token.CHAR and token.STRING, Value is quoted
|
||||
unquoted, err := strconv.Unquote(text.Value)
|
||||
if err != nil {
|
||||
// If unquoting fails, just use the raw Value
|
||||
return text.Value, true
|
||||
}
|
||||
return unquoted, true
|
||||
default:
|
||||
return text.Value, true
|
||||
}
|
||||
}
|
65
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
Normal file
65
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Most of the required functions were available in the
|
||||
// "golang.org/x/tools/go/ast/astutil" package, but not exported.
|
||||
// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go
|
||||
|
||||
package outline
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// packageNameForImport returns the package name for the package. If the package
|
||||
// is not imported, it returns nil. "Package name" refers to `pkgname` in the
|
||||
// call expression `pkgname.ExportedIdentifier`. Examples:
|
||||
// (import path not found) -> nil
|
||||
// "import example.com/pkg/foo" -> "foo"
|
||||
// "import fooalias example.com/pkg/foo" -> "fooalias"
|
||||
// "import . example.com/pkg/foo" -> ""
|
||||
func packageNameForImport(f *ast.File, path string) *string {
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return nil
|
||||
}
|
||||
name := spec.Name.String()
|
||||
if name == "<nil>" {
|
||||
// If the package name is not explicitly specified,
|
||||
// make an educated guess. This is not guaranteed to be correct.
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 {
|
||||
name = path
|
||||
} else {
|
||||
name = path[lastSlash+1:]
|
||||
}
|
||||
}
|
||||
if name == "." {
|
||||
name = ""
|
||||
}
|
||||
return &name
|
||||
}
|
||||
|
||||
// importSpec returns the import spec if f imports path,
|
||||
// or nil otherwise.
|
||||
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
||||
for _, s := range f.Imports {
|
||||
if strings.HasPrefix(importPath(s), path) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// importPath returns the unquoted import path of s,
|
||||
// or "" if the path is not properly quoted.
|
||||
func importPath(s *ast.ImportSpec) string {
|
||||
t, err := strconv.Unquote(s.Path.Value)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return t
|
||||
}
|
103
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
Normal file
103
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
package outline
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
const (
|
||||
// ginkgoImportPath is the well-known ginkgo import path
|
||||
ginkgoImportPath = "github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
// FromASTFile returns an outline for a Ginkgo test source file
|
||||
func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
|
||||
ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
|
||||
if ginkgoPackageName == nil {
|
||||
return nil, fmt.Errorf("file does not import %q", ginkgoImportPath)
|
||||
}
|
||||
|
||||
root := ginkgoNode{}
|
||||
stack := []*ginkgoNode{&root}
|
||||
ispr := inspector.New([]*ast.File{src})
|
||||
ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool {
|
||||
if push {
|
||||
// Pre-order traversal
|
||||
ce, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
// Because `Nodes` calls this function only when the node is an
|
||||
// ast.CallExpr, this should never happen
|
||||
panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
|
||||
}
|
||||
gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName)
|
||||
if !ok {
|
||||
// Node is not a Ginkgo spec or container, continue
|
||||
return true
|
||||
}
|
||||
parent := stack[len(stack)-1]
|
||||
parent.Nodes = append(parent.Nodes, gn)
|
||||
stack = append(stack, gn)
|
||||
return true
|
||||
}
|
||||
// Post-order traversal
|
||||
start, end := absoluteOffsetsForNode(fset, node)
|
||||
lastVisitedGinkgoNode := stack[len(stack)-1]
|
||||
if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End {
|
||||
// Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue
|
||||
return true
|
||||
}
|
||||
stack = stack[0 : len(stack)-1]
|
||||
return true
|
||||
})
|
||||
if len(root.Nodes) == 0 {
|
||||
return &outline{[]*ginkgoNode{}}, nil
|
||||
}
|
||||
|
||||
// Derive the final focused property for all nodes. This must be done
|
||||
// _before_ propagating the inherited focused property.
|
||||
root.BackpropagateUnfocus()
|
||||
// Now, propagate inherited properties, including focused and pending.
|
||||
root.PropagateInheritedProperties()
|
||||
|
||||
return &outline{root.Nodes}, nil
|
||||
}
|
||||
|
||||
type outline struct {
|
||||
Nodes []*ginkgoNode `json:"nodes"`
|
||||
}
|
||||
|
||||
func (o *outline) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(o.Nodes)
|
||||
}
|
||||
|
||||
// String returns a CSV-formatted outline. Spec or container are output in
|
||||
// depth-first order.
|
||||
func (o *outline) String() string {
|
||||
return o.StringIndent(0)
|
||||
}
|
||||
|
||||
// StringIndent returns a CSV-formated outline, but every line is indented by
|
||||
// one 'width' of spaces for every level of nesting.
|
||||
func (o *outline) StringIndent(width int) string {
|
||||
var b strings.Builder
|
||||
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending\n")
|
||||
|
||||
currentIndent := 0
|
||||
pre := func(n *ginkgoNode) {
|
||||
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
|
||||
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending))
|
||||
currentIndent += width
|
||||
}
|
||||
post := func(n *ginkgoNode) {
|
||||
currentIndent -= width
|
||||
}
|
||||
for _, n := range o.Nodes {
|
||||
n.Walk(pre, post)
|
||||
}
|
||||
return b.String()
|
||||
}
|
98
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
generated
vendored
Normal file
98
vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
package outline
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// indentWidth is the width used by the 'indent' output
|
||||
indentWidth = 4
|
||||
// stdinAlias is a portable alias for stdin. This convention is used in
|
||||
// other CLIs, e.g., kubectl.
|
||||
stdinAlias = "-"
|
||||
usageCommand = "ginkgo outline <filename>"
|
||||
)
|
||||
|
||||
type outlineConfig struct {
|
||||
Format string
|
||||
}
|
||||
|
||||
func BuildOutlineCommand() command.Command {
|
||||
conf := outlineConfig{
|
||||
Format: "csv",
|
||||
}
|
||||
flags, err := types.NewGinkgoFlagSet(
|
||||
types.GinkgoFlags{
|
||||
{Name: "format", KeyPath: "Format",
|
||||
Usage: "Format of outline",
|
||||
UsageArgument: "one of 'csv', 'indent', or 'json'",
|
||||
UsageDefaultValue: conf.Format,
|
||||
},
|
||||
},
|
||||
&conf,
|
||||
types.GinkgoFlagSections{},
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return command.Command{
|
||||
Name: "outline",
|
||||
Usage: "ginkgo outline <filename>",
|
||||
ShortDoc: "Create an outline of Ginkgo symbols for a file",
|
||||
Documentation: "To read from stdin, use: `ginkgo outline -`",
|
||||
DocLink: "creating-an-outline-of-specs",
|
||||
Flags: flags,
|
||||
Command: func(args []string, _ []string) {
|
||||
outlineFile(args, conf.Format)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func outlineFile(args []string, format string) {
|
||||
if len(args) != 1 {
|
||||
command.AbortWithUsage("outline expects exactly one argument")
|
||||
}
|
||||
|
||||
filename := args[0]
|
||||
var src *os.File
|
||||
if filename == stdinAlias {
|
||||
src = os.Stdin
|
||||
} else {
|
||||
var err error
|
||||
src, err = os.Open(filename)
|
||||
command.AbortIfError("Failed to open file:", err)
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
|
||||
command.AbortIfError("Failed to parse source:", err)
|
||||
|
||||
o, err := FromASTFile(fset, parsedSrc)
|
||||
command.AbortIfError("Failed to create outline:", err)
|
||||
|
||||
var oerr error
|
||||
switch format {
|
||||
case "csv":
|
||||
_, oerr = fmt.Print(o)
|
||||
case "indent":
|
||||
_, oerr = fmt.Print(o.StringIndent(indentWidth))
|
||||
case "json":
|
||||
b, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error marshalling to json: %s", err))
|
||||
}
|
||||
_, oerr = fmt.Println(string(b))
|
||||
default:
|
||||
command.AbortWith("Format %s not accepted", format)
|
||||
}
|
||||
command.AbortIfError("Failed to write outline:", oerr)
|
||||
}
|
232
vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
generated
vendored
Normal file
232
vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildRunCommand() command.Command {
|
||||
var suiteConfig = types.NewDefaultSuiteConfig()
|
||||
var reporterConfig = types.NewDefaultReporterConfig()
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||
|
||||
flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
interruptHandler := interrupt_handler.NewInterruptHandler(nil)
|
||||
interrupt_handler.SwallowSigQuit()
|
||||
|
||||
return command.Command{
|
||||
Name: "run",
|
||||
Flags: flags,
|
||||
Usage: "ginkgo run <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
ShortDoc: "Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank)",
|
||||
Documentation: "Any arguments after -- will be passed to the test.",
|
||||
DocLink: "running-tests",
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
var errors []error
|
||||
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||
|
||||
runner := &SpecRunner{
|
||||
cliConfig: cliConfig,
|
||||
goFlagsConfig: goFlagsConfig,
|
||||
suiteConfig: suiteConfig,
|
||||
reporterConfig: reporterConfig,
|
||||
flags: flags,
|
||||
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
runner.RunSpecs(args, additionalArgs)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SpecRunner struct {
|
||||
suiteConfig types.SuiteConfig
|
||||
reporterConfig types.ReporterConfig
|
||||
cliConfig types.CLIConfig
|
||||
goFlagsConfig types.GoFlagsConfig
|
||||
flags types.GinkgoFlagSet
|
||||
|
||||
interruptHandler *interrupt_handler.InterruptHandler
|
||||
}
|
||||
|
||||
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
suites := internal.FindSuites(args, r.cliConfig, true)
|
||||
skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter)
|
||||
suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
|
||||
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||
|
||||
if len(skippedSuites) > 0 {
|
||||
fmt.Println("Will skip:")
|
||||
for _, skippedSuite := range skippedSuites {
|
||||
fmt.Println(" " + skippedSuite.Path)
|
||||
}
|
||||
}
|
||||
|
||||
if len(skippedSuites) > 0 && len(suites) == 0 {
|
||||
command.AbortGracefullyWith("All tests skipped! Exiting...")
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
|
||||
if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) {
|
||||
r.reporterConfig.Succinct = true
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
var endTime time.Time
|
||||
if r.suiteConfig.Timeout > 0 {
|
||||
endTime = t.Add(r.suiteConfig.Timeout)
|
||||
}
|
||||
|
||||
iteration := 0
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
if !r.flags.WasSet("seed") {
|
||||
r.suiteConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
if r.cliConfig.RandomizeSuites && len(suites) > 1 {
|
||||
suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed)
|
||||
}
|
||||
|
||||
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
|
||||
opc.StartCompiling(suites, r.goFlagsConfig)
|
||||
|
||||
SUITE_LOOP:
|
||||
for {
|
||||
suiteIdx, suite := opc.Next()
|
||||
if suiteIdx >= len(suites) {
|
||||
break SUITE_LOOP
|
||||
}
|
||||
suites[suiteIdx] = suite
|
||||
|
||||
if r.interruptHandler.Status().Interrupted() {
|
||||
opc.StopAndDrain()
|
||||
break OUTER_LOOP
|
||||
}
|
||||
|
||||
if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) {
|
||||
fmt.Printf("Skipping %s (no test files)\n", suite.Path)
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
|
||||
if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||
fmt.Println(suites[suiteIdx].CompilationError.Error())
|
||||
if !r.cliConfig.KeepGoing {
|
||||
opc.StopAndDrain()
|
||||
}
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing {
|
||||
suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures
|
||||
opc.StopAndDrain()
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
|
||||
if !endTime.IsZero() {
|
||||
r.suiteConfig.Timeout = endTime.Sub(time.Now())
|
||||
if r.suiteConfig.Timeout <= 0 {
|
||||
suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
|
||||
opc.StopAndDrain()
|
||||
continue SUITE_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs)
|
||||
}
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||
if iteration > 0 {
|
||||
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1)
|
||||
}
|
||||
break OUTER_LOOP
|
||||
}
|
||||
|
||||
if r.cliConfig.UntilItFails {
|
||||
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1))
|
||||
} else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat {
|
||||
fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1)
|
||||
} else {
|
||||
break OUTER_LOOP
|
||||
}
|
||||
iteration += 1
|
||||
}
|
||||
|
||||
internal.Cleanup(r.goFlagsConfig, suites...)
|
||||
|
||||
messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig)
|
||||
command.AbortIfError("could not finalize profiles:", err)
|
||||
for _, message := range messages {
|
||||
fmt.Println(message)
|
||||
}
|
||||
|
||||
fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t))
|
||||
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 {
|
||||
if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||
command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE})
|
||||
} else {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
command.Abort(command.AbortDetails{})
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintln(formatter.ColorableStdOut, "")
|
||||
if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||
fmt.Fprintln(formatter.ColorableStdOut,
|
||||
internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor)))
|
||||
}
|
||||
fmt.Printf("Test Suite Failed\n")
|
||||
command.Abort(command.AbortDetails{ExitCode: 1})
|
||||
}
|
||||
}
|
||||
|
||||
func orcMessage(iteration int) string {
|
||||
if iteration < 10 {
|
||||
return ""
|
||||
} else if iteration < 30 {
|
||||
return []string{
|
||||
"If at first you succeed...",
|
||||
"...try, try again.",
|
||||
"Looking good!",
|
||||
"Still good...",
|
||||
"I think your tests are fine....",
|
||||
"Yep, still passing",
|
||||
"Oh boy, here I go testin' again!",
|
||||
"Even the gophers are getting bored",
|
||||
"Did you try -race?",
|
||||
"Maybe you should stop now?",
|
||||
"I'm getting tired...",
|
||||
"What if I just made you a sandwich?",
|
||||
"Hit ^C, hit ^C, please hit ^C",
|
||||
"Make it stop. Please!",
|
||||
"Come on! Enough is enough!",
|
||||
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||
"Just what do you think you're doing, Dave? ",
|
||||
"I, Sisyphus",
|
||||
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||
"I guess Einstein never tried to churn butter",
|
||||
}[iteration-10] + "\n"
|
||||
} else {
|
||||
return "No, seriously... you can probably stop now.\n"
|
||||
}
|
||||
}
|
186
vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
generated
vendored
Normal file
186
vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
package unfocus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
)
|
||||
|
||||
func BuildUnfocusCommand() command.Command {
|
||||
return command.Command{
|
||||
Name: "unfocus",
|
||||
Usage: "ginkgo unfocus",
|
||||
ShortDoc: "Recursively unfocus any focused tests under the current directory",
|
||||
DocLink: "filtering-specs",
|
||||
Command: func(_ []string, _ []string) {
|
||||
unfocusSpecs()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func unfocusSpecs() {
|
||||
fmt.Println("Scanning for focus...")
|
||||
|
||||
goFiles := make(chan string)
|
||||
go func() {
|
||||
unfocusDir(goFiles, ".")
|
||||
close(goFiles)
|
||||
}()
|
||||
|
||||
const workers = 10
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
for path := range goFiles {
|
||||
unfocusFile(path)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func unfocusDir(goFiles chan string, path string) {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
switch {
|
||||
case f.IsDir() && shouldProcessDir(f.Name()):
|
||||
unfocusDir(goFiles, filepath.Join(path, f.Name()))
|
||||
case !f.IsDir() && shouldProcessFile(f.Name()):
|
||||
goFiles <- filepath.Join(path, f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func shouldProcessDir(basename string) bool {
|
||||
return basename != "vendor" && !strings.HasPrefix(basename, ".")
|
||||
}
|
||||
|
||||
func shouldProcessFile(basename string) bool {
|
||||
return strings.HasSuffix(basename, ".go")
|
||||
}
|
||||
|
||||
func unfocusFile(path string) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments)
|
||||
if err != nil {
|
||||
fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
eliminations := scanForFocus(ast)
|
||||
if len(eliminations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("...updating %s\n", path)
|
||||
backup, err := writeBackup(path, data)
|
||||
if err != nil {
|
||||
fmt.Printf("error creating backup file: %s\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := updateFile(path, data, eliminations); err != nil {
|
||||
fmt.Printf("error writing file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
os.Remove(backup)
|
||||
}
|
||||
|
||||
func writeBackup(path string, data []byte) (string, error) {
|
||||
t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path))
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating temporary file: %w", err)
|
||||
}
|
||||
defer t.Close()
|
||||
|
||||
if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
|
||||
return "", fmt.Errorf("error writing to temporary file: %w", err)
|
||||
}
|
||||
|
||||
return t.Name(), nil
|
||||
}
|
||||
|
||||
func updateFile(path string, data []byte, eliminations [][]int64) error {
|
||||
to, err := os.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
|
||||
}
|
||||
defer to.Close()
|
||||
|
||||
from := bytes.NewReader(data)
|
||||
var cursor int64
|
||||
for _, eliminationRange := range eliminations {
|
||||
positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1]
|
||||
if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil {
|
||||
return fmt.Errorf("error copying data: %w", err)
|
||||
}
|
||||
|
||||
cursor = positionToEliminate + lengthToEliminate
|
||||
|
||||
if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil {
|
||||
return fmt.Errorf("error seeking to position in buffer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := io.Copy(to, from); err != nil {
|
||||
return fmt.Errorf("error copying end data: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanForFocus(file *ast.File) (eliminations [][]int64) {
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
if c, ok := n.(*ast.CallExpr); ok {
|
||||
if i, ok := c.Fun.(*ast.Ident); ok {
|
||||
if isFocus(i.Name) {
|
||||
eliminations = append(eliminations, []int64{int64(i.Pos()), 1})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i, ok := n.(*ast.Ident); ok {
|
||||
if i.Name == "Focus" {
|
||||
eliminations = append(eliminations, []int64{int64(i.Pos()), 6})
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return eliminations
|
||||
}
|
||||
|
||||
func isFocus(name string) bool {
|
||||
switch name {
|
||||
case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
22
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
generated
vendored
Normal file
22
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
package watch
|
||||
|
||||
import "sort"
|
||||
|
||||
type Delta struct {
|
||||
ModifiedPackages []string
|
||||
|
||||
NewSuites []*Suite
|
||||
RemovedSuites []*Suite
|
||||
modifiedSuites []*Suite
|
||||
}
|
||||
|
||||
type DescendingByDelta []*Suite
|
||||
|
||||
func (a DescendingByDelta) Len() int { return len(a) }
|
||||
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
|
||||
|
||||
func (d Delta) ModifiedSuites() []*Suite {
|
||||
sort.Sort(DescendingByDelta(d.modifiedSuites))
|
||||
return d.modifiedSuites
|
||||
}
|
75
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
75
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
)
|
||||
|
||||
type SuiteErrors map[internal.TestSuite]error
|
||||
|
||||
type DeltaTracker struct {
|
||||
maxDepth int
|
||||
watchRegExp *regexp.Regexp
|
||||
suites map[string]*Suite
|
||||
packageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
|
||||
return &DeltaTracker{
|
||||
maxDepth: maxDepth,
|
||||
watchRegExp: watchRegExp,
|
||||
packageHashes: NewPackageHashes(watchRegExp),
|
||||
suites: map[string]*Suite{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) {
|
||||
errors = SuiteErrors{}
|
||||
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
|
||||
|
||||
providedSuitePaths := map[string]bool{}
|
||||
for _, suite := range suites {
|
||||
providedSuitePaths[suite.Path] = true
|
||||
}
|
||||
|
||||
d.packageHashes.StartTrackingUsage()
|
||||
|
||||
for _, suite := range d.suites {
|
||||
if providedSuitePaths[suite.Suite.Path] {
|
||||
if suite.Delta() > 0 {
|
||||
delta.modifiedSuites = append(delta.modifiedSuites, suite)
|
||||
}
|
||||
} else {
|
||||
delta.RemovedSuites = append(delta.RemovedSuites, suite)
|
||||
}
|
||||
}
|
||||
|
||||
d.packageHashes.StopTrackingUsageAndPrune()
|
||||
|
||||
for _, suite := range suites {
|
||||
_, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
|
||||
if err != nil {
|
||||
errors[suite] = err
|
||||
continue
|
||||
}
|
||||
d.suites[suite.Path] = s
|
||||
delta.NewSuites = append(delta.NewSuites, s)
|
||||
}
|
||||
}
|
||||
|
||||
return delta, errors
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) WillRun(suite internal.TestSuite) error {
|
||||
s, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown suite %s", suite.Path)
|
||||
}
|
||||
|
||||
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
|
||||
var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
|
||||
|
||||
type Dependencies struct {
|
||||
deps map[string]int
|
||||
}
|
||||
|
||||
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
|
||||
d := Dependencies{
|
||||
deps: map[string]int{},
|
||||
}
|
||||
|
||||
if maxDepth == 0 {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
err := d.seedWithDepsForPackageAtPath(path)
|
||||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
for depth := 1; depth < maxDepth; depth++ {
|
||||
n := len(d.deps)
|
||||
d.addDepsForDepth(depth)
|
||||
if n == len(d.deps) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d Dependencies) Dependencies() map[string]int {
|
||||
return d.deps
|
||||
}
|
||||
|
||||
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.resolveAndAdd(pkg.Imports, 1)
|
||||
d.resolveAndAdd(pkg.TestImports, 1)
|
||||
d.resolveAndAdd(pkg.XTestImports, 1)
|
||||
|
||||
delete(d.deps, pkg.Dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDepth(depth int) {
|
||||
for dep, depDepth := range d.deps {
|
||||
if depDepth == depth {
|
||||
d.addDepsForDep(dep, depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDep(dep string, depth int) {
|
||||
pkg, err := build.ImportDir(dep, 0)
|
||||
if err != nil {
|
||||
println(err.Error())
|
||||
return
|
||||
}
|
||||
d.resolveAndAdd(pkg.Imports, depth)
|
||||
}
|
||||
|
||||
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||
for _, dep := range deps {
|
||||
pkg, err := build.Import(dep, ".", 0)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) {
|
||||
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
|
||||
_, ok := d.deps[dep]
|
||||
if !ok {
|
||||
d.deps[dep] = depth
|
||||
}
|
||||
}
|
108
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
Normal file
108
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||
|
||||
type PackageHash struct {
|
||||
CodeModifiedTime time.Time
|
||||
TestModifiedTime time.Time
|
||||
Deleted bool
|
||||
|
||||
path string
|
||||
codeHash string
|
||||
testHash string
|
||||
watchRegExp *regexp.Regexp
|
||||
}
|
||||
|
||||
func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
|
||||
p := &PackageHash{
|
||||
path: path,
|
||||
watchRegExp: watchRegExp,
|
||||
}
|
||||
|
||||
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PackageHash) CheckForChanges() bool {
|
||||
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
|
||||
|
||||
if deleted {
|
||||
if !p.Deleted {
|
||||
t := time.Now()
|
||||
p.CodeModifiedTime = t
|
||||
p.TestModifiedTime = t
|
||||
}
|
||||
p.Deleted = true
|
||||
return true
|
||||
}
|
||||
|
||||
modified := false
|
||||
p.Deleted = false
|
||||
|
||||
if p.codeHash != codeHash {
|
||||
p.CodeModifiedTime = codeModifiedTime
|
||||
modified = true
|
||||
}
|
||||
if p.testHash != testHash {
|
||||
p.TestModifiedTime = testModifiedTime
|
||||
modified = true
|
||||
}
|
||||
|
||||
p.codeHash = codeHash
|
||||
p.testHash = testHash
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
|
||||
entries, err := os.ReadDir(p.path)
|
||||
|
||||
if err != nil {
|
||||
deleted = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if goTestRegExp.Match([]byte(info.Name())) {
|
||||
testHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(testModifiedTime) {
|
||||
testModifiedTime = info.ModTime()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if p.watchRegExp.Match([]byte(info.Name())) {
|
||||
codeHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(codeModifiedTime) {
|
||||
codeModifiedTime = info.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testHash += codeHash
|
||||
if codeModifiedTime.After(testModifiedTime) {
|
||||
testModifiedTime = codeModifiedTime
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||
}
|
85
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
85
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PackageHashes struct {
|
||||
PackageHashes map[string]*PackageHash
|
||||
usedPaths map[string]bool
|
||||
watchRegExp *regexp.Regexp
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
|
||||
return &PackageHashes{
|
||||
PackageHashes: map[string]*PackageHash{},
|
||||
usedPaths: nil,
|
||||
watchRegExp: watchRegExp,
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) CheckForChanges() []string {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
modified := []string{}
|
||||
|
||||
for _, packageHash := range p.PackageHashes {
|
||||
if packageHash.CheckForChanges() {
|
||||
modified = append(modified, packageHash.path)
|
||||
}
|
||||
}
|
||||
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Add(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
_, ok := p.PackageHashes[path]
|
||||
if !ok {
|
||||
p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
|
||||
}
|
||||
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Get(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StartTrackingUsage() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.usedPaths = map[string]bool{}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StopTrackingUsageAndPrune() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for path := range p.PackageHashes {
|
||||
if !p.usedPaths[path] {
|
||||
delete(p.PackageHashes, path)
|
||||
}
|
||||
}
|
||||
|
||||
p.usedPaths = nil
|
||||
}
|
87
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
generated
vendored
Normal file
87
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
)
|
||||
|
||||
type Suite struct {
|
||||
Suite internal.TestSuite
|
||||
RunTime time.Time
|
||||
Dependencies Dependencies
|
||||
|
||||
sharedPackageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
|
||||
deps, err := NewDependencies(suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedPackageHashes.Add(suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
return &Suite{
|
||||
Suite: suite,
|
||||
Dependencies: deps,
|
||||
|
||||
sharedPackageHashes: sharedPackageHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Suite) Delta() float64 {
|
||||
delta := s.delta(s.Suite.Path, true, 0) * 1000
|
||||
for dep, depth := range s.Dependencies.Dependencies() {
|
||||
delta += s.delta(dep, false, depth)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
|
||||
s.RunTime = time.Now()
|
||||
|
||||
deps, err := NewDependencies(s.Suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sharedPackageHashes.Add(s.Suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
s.sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
s.Dependencies = deps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Suite) Description() string {
|
||||
numDeps := len(s.Dependencies.Dependencies())
|
||||
pluralizer := "ies"
|
||||
if numDeps == 1 {
|
||||
pluralizer = "y"
|
||||
}
|
||||
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
|
||||
}
|
||||
|
||||
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
|
||||
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
|
||||
}
|
||||
|
||||
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
|
||||
packageHash := s.sharedPackageHashes.Get(packagePath)
|
||||
var modifiedTime time.Time
|
||||
if includeTests {
|
||||
modifiedTime = packageHash.TestModifiedTime
|
||||
} else {
|
||||
modifiedTime = packageHash.CodeModifiedTime
|
||||
}
|
||||
|
||||
return modifiedTime.Sub(s.RunTime)
|
||||
}
|
192
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
generated
vendored
Normal file
192
vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/command"
|
||||
"github.com/onsi/ginkgo/v2/ginkgo/internal"
|
||||
"github.com/onsi/ginkgo/v2/internal/interrupt_handler"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func BuildWatchCommand() command.Command {
|
||||
var suiteConfig = types.NewDefaultSuiteConfig()
|
||||
var reporterConfig = types.NewDefaultReporterConfig()
|
||||
var cliConfig = types.NewDefaultCLIConfig()
|
||||
var goFlagsConfig = types.NewDefaultGoFlagsConfig()
|
||||
|
||||
flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
interruptHandler := interrupt_handler.NewInterruptHandler(nil)
|
||||
interrupt_handler.SwallowSigQuit()
|
||||
|
||||
return command.Command{
|
||||
Name: "watch",
|
||||
Flags: flags,
|
||||
Usage: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
ShortDoc: "Watch the passed in <PACKAGES> and runs their tests whenever changes occur.",
|
||||
Documentation: "Any arguments after -- will be passed to the test.",
|
||||
DocLink: "watching-for-changes",
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
var errors []error
|
||||
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
|
||||
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
|
||||
|
||||
watcher := &SpecWatcher{
|
||||
cliConfig: cliConfig,
|
||||
goFlagsConfig: goFlagsConfig,
|
||||
suiteConfig: suiteConfig,
|
||||
reporterConfig: reporterConfig,
|
||||
flags: flags,
|
||||
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
watcher.WatchSpecs(args, additionalArgs)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SpecWatcher struct {
|
||||
suiteConfig types.SuiteConfig
|
||||
reporterConfig types.ReporterConfig
|
||||
cliConfig types.CLIConfig
|
||||
goFlagsConfig types.GoFlagsConfig
|
||||
flags types.GinkgoFlagSet
|
||||
|
||||
interruptHandler *interrupt_handler.InterruptHandler
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||
suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
|
||||
internal.VerifyCLIAndFrameworkVersion(suites)
|
||||
|
||||
if len(suites) == 0 {
|
||||
command.AbortWith("Found no test suites")
|
||||
}
|
||||
|
||||
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth)
|
||||
deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp))
|
||||
delta, errors := deltaTracker.Delta(suites)
|
||||
|
||||
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
|
||||
for suite, err := range errors {
|
||||
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
|
||||
}
|
||||
|
||||
if len(suites) == 1 {
|
||||
w.updateSeed()
|
||||
w.compileAndRun(suites[0], additionalArgs)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
|
||||
delta, _ := deltaTracker.Delta(suites)
|
||||
coloredStream := formatter.ColorableStdOut
|
||||
|
||||
suites = internal.TestSuites{}
|
||||
|
||||
if len(delta.NewSuites) > 0 {
|
||||
fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))))
|
||||
for _, suite := range delta.NewSuites {
|
||||
suites = append(suites, suite.Suite)
|
||||
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
|
||||
}
|
||||
}
|
||||
|
||||
modifiedSuites := delta.ModifiedSuites()
|
||||
if len(modifiedSuites) > 0 {
|
||||
fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}"))
|
||||
for _, pkg := range delta.ModifiedPackages {
|
||||
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg))
|
||||
}
|
||||
fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites))))
|
||||
for _, suite := range modifiedSuites {
|
||||
suites = append(suites, suite.Suite)
|
||||
fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
|
||||
}
|
||||
fmt.Fprintln(coloredStream, "")
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
w.updateSeed()
|
||||
w.computeSuccinctMode(len(suites))
|
||||
for idx := range suites {
|
||||
if w.interruptHandler.Status().Interrupted() {
|
||||
return
|
||||
}
|
||||
deltaTracker.WillRun(suites[idx])
|
||||
suites[idx] = w.compileAndRun(suites[idx], additionalArgs)
|
||||
}
|
||||
color := "{{green}}"
|
||||
if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
|
||||
color = "{{red}}"
|
||||
}
|
||||
fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}"))
|
||||
|
||||
messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig)
|
||||
command.AbortIfError("could not finalize profiles:", err)
|
||||
for _, message := range messages {
|
||||
fmt.Println(message)
|
||||
}
|
||||
case <-w.interruptHandler.Status().Channel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
|
||||
suite = internal.CompileSuite(suite, w.goFlagsConfig)
|
||||
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
|
||||
fmt.Println(suite.CompilationError.Error())
|
||||
return suite
|
||||
}
|
||||
if w.interruptHandler.Status().Interrupted() {
|
||||
return suite
|
||||
}
|
||||
suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs)
|
||||
internal.Cleanup(w.goFlagsConfig, suite)
|
||||
return suite
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) computeSuccinctMode(numSuites int) {
|
||||
if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) {
|
||||
w.reporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if w.flags.WasSet("succinct") {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
w.reporterConfig.Succinct = false
|
||||
}
|
||||
|
||||
if numSuites > 1 {
|
||||
w.reporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) updateSeed() {
|
||||
if !w.flags.WasSet("seed") {
|
||||
w.suiteConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
162
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
Normal file
162
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package interrupt_handler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/internal/parallel_support"
|
||||
)
|
||||
|
||||
const ABORT_POLLING_INTERVAL = 500 * time.Millisecond
|
||||
|
||||
type InterruptCause uint
|
||||
|
||||
const (
|
||||
InterruptCauseInvalid InterruptCause = iota
|
||||
InterruptCauseSignal
|
||||
InterruptCauseAbortByOtherProcess
|
||||
)
|
||||
|
||||
type InterruptLevel uint
|
||||
|
||||
const (
|
||||
InterruptLevelUninterrupted InterruptLevel = iota
|
||||
InterruptLevelCleanupAndReport
|
||||
InterruptLevelReportOnly
|
||||
InterruptLevelBailOut
|
||||
)
|
||||
|
||||
func (ic InterruptCause) String() string {
|
||||
switch ic {
|
||||
case InterruptCauseSignal:
|
||||
return "Interrupted by User"
|
||||
case InterruptCauseAbortByOtherProcess:
|
||||
return "Interrupted by Other Ginkgo Process"
|
||||
}
|
||||
return "INVALID_INTERRUPT_CAUSE"
|
||||
}
|
||||
|
||||
type InterruptStatus struct {
|
||||
Channel chan interface{}
|
||||
Level InterruptLevel
|
||||
Cause InterruptCause
|
||||
}
|
||||
|
||||
func (s InterruptStatus) Interrupted() bool {
|
||||
return s.Level != InterruptLevelUninterrupted
|
||||
}
|
||||
|
||||
func (s InterruptStatus) Message() string {
|
||||
return s.Cause.String()
|
||||
}
|
||||
|
||||
func (s InterruptStatus) ShouldIncludeProgressReport() bool {
|
||||
return s.Cause != InterruptCauseAbortByOtherProcess
|
||||
}
|
||||
|
||||
type InterruptHandlerInterface interface {
|
||||
Status() InterruptStatus
|
||||
}
|
||||
|
||||
type InterruptHandler struct {
|
||||
c chan interface{}
|
||||
lock *sync.Mutex
|
||||
level InterruptLevel
|
||||
cause InterruptCause
|
||||
client parallel_support.Client
|
||||
stop chan interface{}
|
||||
signals []os.Signal
|
||||
}
|
||||
|
||||
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
|
||||
if len(signals) == 0 {
|
||||
signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
||||
}
|
||||
handler := &InterruptHandler{
|
||||
c: make(chan interface{}),
|
||||
lock: &sync.Mutex{},
|
||||
stop: make(chan interface{}),
|
||||
client: client,
|
||||
signals: signals,
|
||||
}
|
||||
handler.registerForInterrupts()
|
||||
return handler
|
||||
}
|
||||
|
||||
func (handler *InterruptHandler) Stop() {
|
||||
close(handler.stop)
|
||||
}
|
||||
|
||||
func (handler *InterruptHandler) registerForInterrupts() {
|
||||
// os signal handling
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChannel, handler.signals...)
|
||||
|
||||
// cross-process abort handling
|
||||
var abortChannel chan interface{}
|
||||
if handler.client != nil {
|
||||
abortChannel = make(chan interface{})
|
||||
go func() {
|
||||
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
|
||||
for {
|
||||
select {
|
||||
case <-pollTicker.C:
|
||||
if handler.client.ShouldAbort() {
|
||||
close(abortChannel)
|
||||
pollTicker.Stop()
|
||||
return
|
||||
}
|
||||
case <-handler.stop:
|
||||
pollTicker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go func(abortChannel chan interface{}) {
|
||||
var interruptCause InterruptCause
|
||||
for {
|
||||
select {
|
||||
case <-signalChannel:
|
||||
interruptCause = InterruptCauseSignal
|
||||
case <-abortChannel:
|
||||
interruptCause = InterruptCauseAbortByOtherProcess
|
||||
case <-handler.stop:
|
||||
signal.Stop(signalChannel)
|
||||
return
|
||||
}
|
||||
abortChannel = nil
|
||||
|
||||
handler.lock.Lock()
|
||||
oldLevel := handler.level
|
||||
handler.cause = interruptCause
|
||||
if handler.level == InterruptLevelUninterrupted {
|
||||
handler.level = InterruptLevelCleanupAndReport
|
||||
} else if handler.level == InterruptLevelCleanupAndReport {
|
||||
handler.level = InterruptLevelReportOnly
|
||||
} else if handler.level == InterruptLevelReportOnly {
|
||||
handler.level = InterruptLevelBailOut
|
||||
}
|
||||
if handler.level != oldLevel {
|
||||
close(handler.c)
|
||||
handler.c = make(chan interface{})
|
||||
}
|
||||
handler.lock.Unlock()
|
||||
}
|
||||
}(abortChannel)
|
||||
}
|
||||
|
||||
func (handler *InterruptHandler) Status() InterruptStatus {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
|
||||
return InterruptStatus{
|
||||
Level: handler.level,
|
||||
Channel: handler.c,
|
||||
Cause: handler.cause,
|
||||
}
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||
|
||||
package interrupt_handler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func SwallowSigQuit() {
|
||||
c := make(chan os.Signal, 1024)
|
||||
signal.Notify(c, syscall.SIGQUIT)
|
||||
}
|
8
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
generated
vendored
Normal file
8
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package interrupt_handler
|
||||
|
||||
func SwallowSigQuit() {
|
||||
//noop
|
||||
}
|
70
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
Normal file
70
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package parallel_support
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type BeforeSuiteState struct {
|
||||
Data []byte
|
||||
State types.SpecState
|
||||
}
|
||||
|
||||
type ParallelIndexCounter struct {
|
||||
Index int
|
||||
}
|
||||
|
||||
var ErrorGone = fmt.Errorf("gone")
|
||||
var ErrorFailed = fmt.Errorf("failed")
|
||||
var ErrorEarly = fmt.Errorf("early")
|
||||
|
||||
var POLLING_INTERVAL = 50 * time.Millisecond
|
||||
|
||||
type Server interface {
|
||||
Start()
|
||||
Close()
|
||||
Address() string
|
||||
RegisterAlive(node int, alive func() bool)
|
||||
GetSuiteDone() chan interface{}
|
||||
GetOutputDestination() io.Writer
|
||||
SetOutputDestination(io.Writer)
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
Connect() bool
|
||||
Close() error
|
||||
|
||||
PostSuiteWillBegin(report types.Report) error
|
||||
PostDidRun(report types.SpecReport) error
|
||||
PostSuiteDidEnd(report types.Report) error
|
||||
PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
|
||||
BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
|
||||
BlockUntilNonprimaryProcsHaveFinished() error
|
||||
BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
|
||||
FetchNextCounter() (int, error)
|
||||
PostAbort() error
|
||||
ShouldAbort() bool
|
||||
PostEmitProgressReport(report types.ProgressReport) error
|
||||
Write(p []byte) (int, error)
|
||||
}
|
||||
|
||||
func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
|
||||
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
|
||||
return newHttpServer(parallelTotal, reporter)
|
||||
} else {
|
||||
return newRPCServer(parallelTotal, reporter)
|
||||
}
|
||||
}
|
||||
|
||||
func NewClient(serverHost string) Client {
|
||||
if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
|
||||
return newHttpClient(serverHost)
|
||||
} else {
|
||||
return newRPCClient(serverHost)
|
||||
}
|
||||
}
|
156
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
Normal file
156
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
package parallel_support
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type httpClient struct {
|
||||
serverHost string
|
||||
}
|
||||
|
||||
func newHttpClient(serverHost string) *httpClient {
|
||||
return &httpClient{
|
||||
serverHost: serverHost,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *httpClient) Connect() bool {
|
||||
resp, err := http.Get(client.serverHost + "/up")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
return resp.StatusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func (client *httpClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *httpClient) post(path string, data interface{}) error {
|
||||
var body io.Reader
|
||||
if data != nil {
|
||||
encoded, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body = bytes.NewBuffer(encoded)
|
||||
}
|
||||
resp, err := http.Post(client.serverHost+path, "application/json", body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *httpClient) poll(path string, data interface{}) error {
|
||||
for {
|
||||
resp, err := http.Get(client.serverHost + path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode == http.StatusTooEarly {
|
||||
resp.Body.Close()
|
||||
time.Sleep(POLLING_INTERVAL)
|
||||
continue
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusGone {
|
||||
return ErrorGone
|
||||
}
|
||||
if resp.StatusCode == http.StatusFailedDependency {
|
||||
return ErrorFailed
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
if data != nil {
|
||||
return json.NewDecoder(resp.Body).Decode(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
|
||||
return client.post("/suite-will-begin", report)
|
||||
}
|
||||
|
||||
func (client *httpClient) PostDidRun(report types.SpecReport) error {
|
||||
return client.post("/did-run", report)
|
||||
}
|
||||
|
||||
func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
|
||||
return client.post("/suite-did-end", report)
|
||||
}
|
||||
|
||||
func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
|
||||
return client.post("/progress-report", report)
|
||||
}
|
||||
|
||||
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||
beforeSuiteState := BeforeSuiteState{
|
||||
State: state,
|
||||
Data: data,
|
||||
}
|
||||
return client.post("/before-suite-completed", beforeSuiteState)
|
||||
}
|
||||
|
||||
func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
|
||||
var beforeSuiteState BeforeSuiteState
|
||||
err := client.poll("/before-suite-state", &beforeSuiteState)
|
||||
if err == ErrorGone {
|
||||
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
|
||||
}
|
||||
return beforeSuiteState.State, beforeSuiteState.Data, err
|
||||
}
|
||||
|
||||
func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
|
||||
return client.poll("/have-nonprimary-procs-finished", nil)
|
||||
}
|
||||
|
||||
func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
|
||||
var report types.Report
|
||||
err := client.poll("/aggregated-nonprimary-procs-report", &report)
|
||||
if err == ErrorGone {
|
||||
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
|
||||
}
|
||||
return report, err
|
||||
}
|
||||
|
||||
func (client *httpClient) FetchNextCounter() (int, error) {
|
||||
var counter ParallelIndexCounter
|
||||
err := client.poll("/counter", &counter)
|
||||
return counter.Index, err
|
||||
}
|
||||
|
||||
func (client *httpClient) PostAbort() error {
|
||||
return client.post("/abort", nil)
|
||||
}
|
||||
|
||||
func (client *httpClient) ShouldAbort() bool {
|
||||
err := client.poll("/abort", nil)
|
||||
if err == ErrorGone {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (client *httpClient) Write(p []byte) (int, error) {
|
||||
resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return 0, fmt.Errorf("failed to emit output")
|
||||
}
|
||||
return len(p), err
|
||||
}
|
223
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
Normal file
223
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
generated
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package parallel_support
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
/*
|
||||
httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type httpServer struct {
|
||||
listener net.Listener
|
||||
handler *ServerHandler
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &httpServer{
|
||||
listener: listener,
|
||||
handler: newServerHandler(parallelTotal, reporter),
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *httpServer) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
httpServer.Handler = mux
|
||||
|
||||
//streaming endpoints
|
||||
mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
|
||||
mux.HandleFunc("/did-run", server.didRun)
|
||||
mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
|
||||
mux.HandleFunc("/emit-output", server.emitOutput)
|
||||
mux.HandleFunc("/progress-report", server.emitProgressReport)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
|
||||
mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
|
||||
mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
|
||||
mux.HandleFunc("/counter", server.handleCounter)
|
||||
mux.HandleFunc("/up", server.handleUp)
|
||||
mux.HandleFunc("/abort", server.handleAbort)
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *httpServer) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *httpServer) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
|
||||
func (server *httpServer) GetSuiteDone() chan interface{} {
|
||||
return server.handler.done
|
||||
}
|
||||
|
||||
func (server *httpServer) GetOutputDestination() io.Writer {
|
||||
return server.handler.outputDestination
|
||||
}
|
||||
|
||||
func (server *httpServer) SetOutputDestination(w io.Writer) {
|
||||
server.handler.outputDestination = w
|
||||
}
|
||||
|
||||
func (server *httpServer) RegisterAlive(node int, alive func() bool) {
|
||||
server.handler.registerAlive(node, alive)
|
||||
}
|
||||
|
||||
//
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
|
||||
defer request.Body.Close()
|
||||
if json.NewDecoder(request.Body).Decode(object) != nil {
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
switch err {
|
||||
case ErrorEarly:
|
||||
writer.WriteHeader(http.StatusTooEarly)
|
||||
case ErrorGone:
|
||||
writer.WriteHeader(http.StatusGone)
|
||||
case ErrorFailed:
|
||||
writer.WriteHeader(http.StatusFailedDependency)
|
||||
default:
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||
var report types.Report
|
||||
if !server.decode(writer, request, &report) {
|
||||
return
|
||||
}
|
||||
|
||||
server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
|
||||
var report types.SpecReport
|
||||
if !server.decode(writer, request, &report) {
|
||||
return
|
||||
}
|
||||
|
||||
server.handleError(server.handler.DidRun(report, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||
var report types.Report
|
||||
if !server.decode(writer, request, &report) {
|
||||
return
|
||||
}
|
||||
server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
|
||||
output, err := io.ReadAll(request.Body)
|
||||
if err != nil {
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var n int
|
||||
server.handleError(server.handler.EmitOutput(output, &n), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
|
||||
var report types.ProgressReport
|
||||
if !server.decode(writer, request, &report) {
|
||||
return
|
||||
}
|
||||
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
|
||||
var beforeSuiteState BeforeSuiteState
|
||||
if !server.decode(writer, request, &beforeSuiteState) {
|
||||
return
|
||||
}
|
||||
|
||||
server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
var beforeSuiteState BeforeSuiteState
|
||||
if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
|
||||
return
|
||||
}
|
||||
json.NewEncoder(writer).Encode(beforeSuiteState)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
|
||||
if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
|
||||
return
|
||||
}
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
|
||||
var aggregatedReport types.Report
|
||||
if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
|
||||
return
|
||||
}
|
||||
json.NewEncoder(writer).Encode(aggregatedReport)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
var n int
|
||||
if server.handleError(server.handler.Counter(voidSender, &n), writer) {
|
||||
return
|
||||
}
|
||||
json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
|
||||
}
|
||||
|
||||
func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
|
||||
if request.Method == "GET" {
|
||||
var shouldAbort bool
|
||||
server.handler.ShouldAbort(voidSender, &shouldAbort)
|
||||
if shouldAbort {
|
||||
writer.WriteHeader(http.StatusGone)
|
||||
} else {
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
}
|
||||
} else {
|
||||
server.handler.Abort(voidSender, voidReceiver)
|
||||
}
|
||||
}
|
123
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
Normal file
123
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
package parallel_support
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type rpcClient struct {
|
||||
serverHost string
|
||||
client *rpc.Client
|
||||
}
|
||||
|
||||
func newRPCClient(serverHost string) *rpcClient {
|
||||
return &rpcClient{
|
||||
serverHost: serverHost,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *rpcClient) Connect() bool {
|
||||
var err error
|
||||
if client.client != nil {
|
||||
return true
|
||||
}
|
||||
client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
|
||||
if err != nil {
|
||||
client.client = nil
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (client *rpcClient) Close() error {
|
||||
return client.client.Close()
|
||||
}
|
||||
|
||||
func (client *rpcClient) poll(method string, data interface{}) error {
|
||||
for {
|
||||
err := client.client.Call(method, voidSender, data)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
switch err.Error() {
|
||||
case ErrorEarly.Error():
|
||||
time.Sleep(POLLING_INTERVAL)
|
||||
case ErrorGone.Error():
|
||||
return ErrorGone
|
||||
case ErrorFailed.Error():
|
||||
return ErrorFailed
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
|
||||
return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostDidRun(report types.SpecReport) error {
|
||||
return client.client.Call("Server.DidRun", report, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
|
||||
return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) Write(p []byte) (int, error) {
|
||||
var n int
|
||||
err := client.client.Call("Server.EmitOutput", p, &n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
|
||||
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
|
||||
beforeSuiteState := BeforeSuiteState{
|
||||
State: state,
|
||||
Data: data,
|
||||
}
|
||||
return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
|
||||
var beforeSuiteState BeforeSuiteState
|
||||
err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
|
||||
if err == ErrorGone {
|
||||
return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
|
||||
}
|
||||
return beforeSuiteState.State, beforeSuiteState.Data, err
|
||||
}
|
||||
|
||||
func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
|
||||
return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
|
||||
var report types.Report
|
||||
err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
|
||||
if err == ErrorGone {
|
||||
return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
|
||||
}
|
||||
return report, err
|
||||
}
|
||||
|
||||
func (client *rpcClient) FetchNextCounter() (int, error) {
|
||||
var counter int
|
||||
err := client.client.Call("Server.Counter", voidSender, &counter)
|
||||
return counter, err
|
||||
}
|
||||
|
||||
func (client *rpcClient) PostAbort() error {
|
||||
return client.client.Call("Server.Abort", voidSender, voidReceiver)
|
||||
}
|
||||
|
||||
func (client *rpcClient) ShouldAbort() bool {
|
||||
var shouldAbort bool
|
||||
client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
|
||||
return shouldAbort
|
||||
}
|
75
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
generated
vendored
Normal file
75
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package parallel_support
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/rpc"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
)
|
||||
|
||||
/*
|
||||
RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type RPCServer struct {
|
||||
listener net.Listener
|
||||
handler *ServerHandler
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RPCServer{
|
||||
listener: listener,
|
||||
handler: newServerHandler(parallelTotal, reporter),
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *RPCServer) Start() {
|
||||
rpcServer := rpc.NewServer()
|
||||
rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
|
||||
|
||||
httpServer := &http.Server{}
|
||||
httpServer.Handler = rpcServer
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *RPCServer) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *RPCServer) Address() string {
|
||||
return server.listener.Addr().String()
|
||||
}
|
||||
|
||||
func (server *RPCServer) GetSuiteDone() chan interface{} {
|
||||
return server.handler.done
|
||||
}
|
||||
|
||||
func (server *RPCServer) GetOutputDestination() io.Writer {
|
||||
return server.handler.outputDestination
|
||||
}
|
||||
|
||||
func (server *RPCServer) SetOutputDestination(w io.Writer) {
|
||||
server.handler.outputDestination = w
|
||||
}
|
||||
|
||||
func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
|
||||
server.handler.registerAlive(node, alive)
|
||||
}
|
209
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
Normal file
209
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
package parallel_support
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type Void struct{}
|
||||
|
||||
var voidReceiver *Void = &Void{}
|
||||
var voidSender Void
|
||||
|
||||
// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
|
||||
// It handles all the business logic to avoid duplication between the two servers
|
||||
|
||||
type ServerHandler struct {
|
||||
done chan interface{}
|
||||
outputDestination io.Writer
|
||||
reporter reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteState BeforeSuiteState
|
||||
parallelTotal int
|
||||
counter int
|
||||
counterLock *sync.Mutex
|
||||
shouldAbort bool
|
||||
|
||||
numSuiteDidBegins int
|
||||
numSuiteDidEnds int
|
||||
aggregatedReport types.Report
|
||||
reportHoldingArea []types.SpecReport
|
||||
}
|
||||
|
||||
func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
|
||||
return &ServerHandler{
|
||||
reporter: reporter,
|
||||
lock: &sync.Mutex{},
|
||||
counterLock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
|
||||
parallelTotal: parallelTotal,
|
||||
outputDestination: os.Stdout,
|
||||
done: make(chan interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
|
||||
handler.numSuiteDidBegins += 1
|
||||
|
||||
// all summaries are identical, so it's fine to simply emit the last one of these
|
||||
if handler.numSuiteDidBegins == handler.parallelTotal {
|
||||
handler.reporter.SuiteWillBegin(report)
|
||||
|
||||
for _, summary := range handler.reportHoldingArea {
|
||||
handler.reporter.WillRun(summary)
|
||||
handler.reporter.DidRun(summary)
|
||||
}
|
||||
|
||||
handler.reportHoldingArea = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
|
||||
if handler.numSuiteDidBegins == handler.parallelTotal {
|
||||
handler.reporter.WillRun(report)
|
||||
handler.reporter.DidRun(report)
|
||||
} else {
|
||||
handler.reportHoldingArea = append(handler.reportHoldingArea, report)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
|
||||
handler.numSuiteDidEnds += 1
|
||||
if handler.numSuiteDidEnds == 1 {
|
||||
handler.aggregatedReport = report
|
||||
} else {
|
||||
handler.aggregatedReport = handler.aggregatedReport.Add(report)
|
||||
}
|
||||
|
||||
if handler.numSuiteDidEnds == handler.parallelTotal {
|
||||
handler.reporter.SuiteDidEnd(handler.aggregatedReport)
|
||||
close(handler.done)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
|
||||
var err error
|
||||
*n, err = handler.outputDestination.Write(output)
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
handler.reporter.EmitProgressReport(report)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
handler.alives[proc-1] = alive
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) procIsAlive(proc int) bool {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
alive := handler.alives[proc-1]
|
||||
if alive == nil {
|
||||
return true
|
||||
}
|
||||
return alive()
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
|
||||
for i := 2; i <= handler.parallelTotal; i++ {
|
||||
if handler.procIsAlive(i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
handler.beforeSuiteState = beforeSuiteState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
|
||||
proc1IsAlive := handler.procIsAlive(1)
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
if handler.beforeSuiteState.State == types.SpecStateInvalid {
|
||||
if proc1IsAlive {
|
||||
return ErrorEarly
|
||||
} else {
|
||||
return ErrorGone
|
||||
}
|
||||
}
|
||||
*beforeSuiteState = handler.beforeSuiteState
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
|
||||
if handler.haveNonprimaryProcsFinished() {
|
||||
return nil
|
||||
} else {
|
||||
return ErrorEarly
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
|
||||
if handler.haveNonprimaryProcsFinished() {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
if handler.numSuiteDidEnds == handler.parallelTotal-1 {
|
||||
*report = handler.aggregatedReport
|
||||
return nil
|
||||
} else {
|
||||
return ErrorGone
|
||||
}
|
||||
} else {
|
||||
return ErrorEarly
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) Counter(_ Void, counter *int) error {
|
||||
handler.counterLock.Lock()
|
||||
defer handler.counterLock.Unlock()
|
||||
*counter = handler.counter
|
||||
handler.counter++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
handler.shouldAbort = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
|
||||
handler.lock.Lock()
|
||||
defer handler.lock.Unlock()
|
||||
*shouldAbort = handler.shouldAbort
|
||||
return nil
|
||||
}
|
642
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
Normal file
642
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
generated
vendored
Normal file
@@ -0,0 +1,642 @@
|
||||
/*
|
||||
Ginkgo's Default Reporter
|
||||
|
||||
A number of command line flags are available to tweak Ginkgo's default output.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||
*/
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type DefaultReporter struct {
|
||||
conf types.ReporterConfig
|
||||
writer io.Writer
|
||||
|
||||
// managing the emission stream
|
||||
lastChar string
|
||||
lastEmissionWasDelimiter bool
|
||||
|
||||
// rendering
|
||||
specDenoter string
|
||||
retryDenoter string
|
||||
formatter formatter.Formatter
|
||||
}
|
||||
|
||||
func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
|
||||
reporter := NewDefaultReporter(conf, writer)
|
||||
reporter.formatter = formatter.New(formatter.ColorModePassthrough)
|
||||
|
||||
return reporter
|
||||
}
|
||||
|
||||
func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
|
||||
reporter := &DefaultReporter{
|
||||
conf: conf,
|
||||
writer: writer,
|
||||
|
||||
lastChar: "\n",
|
||||
lastEmissionWasDelimiter: false,
|
||||
|
||||
specDenoter: "•",
|
||||
retryDenoter: "↺",
|
||||
formatter: formatter.NewWithNoColorBool(conf.NoColor),
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
reporter.specDenoter = "+"
|
||||
reporter.retryDenoter = "R"
|
||||
}
|
||||
|
||||
return reporter
|
||||
}
|
||||
|
||||
/* The Reporter Interface */
|
||||
|
||||
func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
|
||||
if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) {
|
||||
r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription))
|
||||
if len(report.SuiteLabels) > 0 {
|
||||
r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
|
||||
}
|
||||
r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
|
||||
if report.SuiteConfig.ParallelTotal > 1 {
|
||||
r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
|
||||
}
|
||||
} else {
|
||||
banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath)
|
||||
r.emitBlock(banner)
|
||||
bannerWidth := len(banner)
|
||||
if len(report.SuiteLabels) > 0 {
|
||||
labels := strings.Join(report.SuiteLabels, ", ")
|
||||
r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels))
|
||||
if len(labels)+2 > bannerWidth {
|
||||
bannerWidth = len(labels) + 2
|
||||
}
|
||||
}
|
||||
r.emitBlock(strings.Repeat("=", bannerWidth))
|
||||
|
||||
out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
|
||||
if report.SuiteConfig.RandomizeAllSpecs {
|
||||
out += r.f(" - will randomize all specs")
|
||||
}
|
||||
r.emitBlock(out)
|
||||
r.emit("\n")
|
||||
r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
|
||||
if report.SuiteConfig.ParallelTotal > 1 {
|
||||
r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) WillRun(report types.SpecReport) {
|
||||
if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) {
|
||||
return
|
||||
}
|
||||
|
||||
r.emitDelimiter()
|
||||
indentation := uint(0)
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText))
|
||||
} else {
|
||||
if len(report.ContainerHierarchyTexts) > 0 {
|
||||
r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " "))
|
||||
indentation = 1
|
||||
}
|
||||
line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText)
|
||||
labels := report.Labels()
|
||||
if len(labels) > 0 {
|
||||
line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", "))
|
||||
}
|
||||
r.emitBlock(line)
|
||||
}
|
||||
r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation))
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) DidRun(report types.SpecReport) {
|
||||
v := r.conf.Verbosity()
|
||||
var header, highlightColor string
|
||||
includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter
|
||||
succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct)
|
||||
|
||||
hasGW := report.CapturedGinkgoWriterOutput != ""
|
||||
hasStd := report.CapturedStdOutErr != ""
|
||||
hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose)))
|
||||
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
denoter = fmt.Sprintf("[%s]", report.LeafNodeType)
|
||||
}
|
||||
|
||||
highlightColor = r.highlightColorForState(report.State)
|
||||
|
||||
switch report.State {
|
||||
case types.SpecStatePassed:
|
||||
succinctLocationBlock = v.LT(types.VerbosityLevelVerbose)
|
||||
emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports {
|
||||
header = fmt.Sprintf("%s PASSED", denoter)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
header, stream = denoter, true
|
||||
if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
|
||||
header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false
|
||||
}
|
||||
if report.RunTime > r.conf.SlowSpecThreshold {
|
||||
header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false
|
||||
}
|
||||
}
|
||||
if hasStd || emitGinkgoWriterOutput || hasEmittableReports {
|
||||
stream = false
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
includeRuntime, emitGinkgoWriterOutput = false, false
|
||||
if v.Is(types.VerbosityLevelSuccinct) {
|
||||
header, stream = "P", true
|
||||
} else {
|
||||
header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose)
|
||||
}
|
||||
case types.SpecStateSkipped:
|
||||
if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) {
|
||||
header = "S [SKIPPED]"
|
||||
} else {
|
||||
header, stream = "S", true
|
||||
}
|
||||
case types.SpecStateFailed:
|
||||
header = fmt.Sprintf("%s [FAILED]", denoter)
|
||||
case types.SpecStateTimedout:
|
||||
header = fmt.Sprintf("%s [TIMEDOUT]", denoter)
|
||||
case types.SpecStatePanicked:
|
||||
header = fmt.Sprintf("%s! [PANICKED]", denoter)
|
||||
case types.SpecStateInterrupted:
|
||||
header = fmt.Sprintf("%s! [INTERRUPTED]", denoter)
|
||||
case types.SpecStateAborted:
|
||||
header = fmt.Sprintf("%s! [ABORTED]", denoter)
|
||||
}
|
||||
|
||||
if report.State.Is(types.SpecStateFailureStates) && report.MaxMustPassRepeatedly > 1 {
|
||||
header, stream = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts), false
|
||||
}
|
||||
// Emit stream and return
|
||||
if stream {
|
||||
r.emit(r.f(highlightColor + header + "{{/}}"))
|
||||
return
|
||||
}
|
||||
|
||||
// Emit header
|
||||
r.emitDelimiter()
|
||||
if includeRuntime {
|
||||
header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
|
||||
}
|
||||
r.emitBlock(r.f(highlightColor + header + "{{/}}"))
|
||||
|
||||
// Emit Code Location Block
|
||||
r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false))
|
||||
|
||||
//Emit Stdout/Stderr Output
|
||||
if hasStd {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}"))
|
||||
r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr))
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}"))
|
||||
}
|
||||
|
||||
//Emit Captured GinkgoWriter Output
|
||||
if emitGinkgoWriterOutput && hasGW {
|
||||
r.emitBlock("\n")
|
||||
r.emitGinkgoWriterOutput(1, report.CapturedGinkgoWriterOutput, 0)
|
||||
}
|
||||
|
||||
if hasEmittableReports {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}"))
|
||||
reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways)
|
||||
if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) {
|
||||
reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose)
|
||||
}
|
||||
for _, entry := range reportEntries {
|
||||
r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))
|
||||
if representation := entry.StringRepresentation(); representation != "" {
|
||||
r.emitBlock(r.fi(3, representation))
|
||||
}
|
||||
}
|
||||
r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}"))
|
||||
}
|
||||
|
||||
// Emit Failure Message
|
||||
if !report.Failure.IsZero() {
|
||||
r.emitBlock("\n")
|
||||
r.EmitFailure(1, report.State, report.Failure, false)
|
||||
}
|
||||
|
||||
if len(report.AdditionalFailures) > 0 {
|
||||
if v.GTE(types.VerbosityLevelVerbose) {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure:{{/}}"))
|
||||
for i, additionalFailure := range report.AdditionalFailures {
|
||||
r.EmitFailure(2, additionalFailure.State, additionalFailure.Failure, true)
|
||||
if i < len(report.AdditionalFailures)-1 {
|
||||
r.emitBlock(r.fi(2, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure. Here's a summary - for full details run Ginkgo in verbose mode:{{/}}"))
|
||||
for _, additionalFailure := range report.AdditionalFailures {
|
||||
r.emitBlock(r.fi(2, r.highlightColorForState(additionalFailure.State)+"[%s]{{/}} in [%s] at %s",
|
||||
r.humanReadableState(additionalFailure.State),
|
||||
additionalFailure.Failure.FailureNodeType,
|
||||
additionalFailure.Failure.Location,
|
||||
))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
r.emitDelimiter()
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStatePassed:
|
||||
return "{{green}}"
|
||||
case types.SpecStatePending:
|
||||
return "{{yellow}}"
|
||||
case types.SpecStateSkipped:
|
||||
return "{{cyan}}"
|
||||
case types.SpecStateFailed:
|
||||
return "{{red}}"
|
||||
case types.SpecStateTimedout:
|
||||
return "{{orange}}"
|
||||
case types.SpecStatePanicked:
|
||||
return "{{magenta}}"
|
||||
case types.SpecStateInterrupted:
|
||||
return "{{orange}}"
|
||||
case types.SpecStateAborted:
|
||||
return "{{coral}}"
|
||||
default:
|
||||
return "{{gray}}"
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
|
||||
return strings.ToUpper(state.String())
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failure types.Failure, includeState bool) {
|
||||
highlightColor := r.highlightColorForState(state)
|
||||
if includeState {
|
||||
r.emitBlock(r.fi(indent, highlightColor+"[%s]{{/}}", r.humanReadableState(state)))
|
||||
}
|
||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.Message))
|
||||
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", failure.FailureNodeType, failure.Location))
|
||||
if failure.ForwardedPanic != "" {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
|
||||
}
|
||||
|
||||
if r.conf.FullTrace || failure.ForwardedPanic != "" {
|
||||
r.emitBlock("\n")
|
||||
r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
|
||||
r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
|
||||
}
|
||||
|
||||
if !failure.ProgressReport.IsZero() {
|
||||
r.emitBlock("\n")
|
||||
r.emitProgressReport(indent, false, failure.ProgressReport)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
|
||||
failures := report.SpecReports.WithState(types.SpecStateFailureStates)
|
||||
if len(failures) > 0 {
|
||||
r.emitBlock("\n\n")
|
||||
if len(failures) > 1 {
|
||||
r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
|
||||
} else {
|
||||
r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}"))
|
||||
}
|
||||
for _, specReport := range failures {
|
||||
highlightColor, heading := "{{red}}", "[FAIL]"
|
||||
switch specReport.State {
|
||||
case types.SpecStatePanicked:
|
||||
highlightColor, heading = "{{magenta}}", "[PANICKED!]"
|
||||
case types.SpecStateAborted:
|
||||
highlightColor, heading = "{{coral}}", "[ABORTED]"
|
||||
case types.SpecStateTimedout:
|
||||
highlightColor, heading = "{{orange}}", "[TIMEDOUT]"
|
||||
case types.SpecStateInterrupted:
|
||||
highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
|
||||
}
|
||||
locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true)
|
||||
r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
|
||||
}
|
||||
}
|
||||
|
||||
//summarize the suite
|
||||
if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded {
|
||||
r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime))
|
||||
return
|
||||
}
|
||||
|
||||
r.emitBlock("\n")
|
||||
color, status := "{{green}}{{bold}}", "SUCCESS!"
|
||||
if !report.SuiteSucceeded {
|
||||
color, status = "{{red}}{{bold}}", "FAIL!"
|
||||
}
|
||||
|
||||
specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes
|
||||
r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}",
|
||||
specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates),
|
||||
report.PreRunStats.TotalSpecs,
|
||||
report.RunTime.Seconds()),
|
||||
)
|
||||
|
||||
switch len(report.SpecialSuiteFailureReasons) {
|
||||
case 0:
|
||||
r.emit(r.f(color+"%s{{/}} -- ", status))
|
||||
case 1:
|
||||
r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0]))
|
||||
default:
|
||||
r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", ")))
|
||||
}
|
||||
|
||||
if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 {
|
||||
r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n"))
|
||||
} else {
|
||||
r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed)))
|
||||
r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates)))
|
||||
if specs.CountOfFlakedSpecs() > 0 {
|
||||
r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs()))
|
||||
}
|
||||
if specs.CountOfRepeatedSpecs() > 0 {
|
||||
r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs()))
|
||||
}
|
||||
r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending)))
|
||||
r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped)))
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
|
||||
r.emitDelimiter()
|
||||
|
||||
if report.RunningInParallel {
|
||||
r.emit(r.f("{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
|
||||
}
|
||||
r.emitProgressReport(0, true, report)
|
||||
r.emitDelimiter()
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
|
||||
if report.Message != "" {
|
||||
r.emitBlock(r.fi(indent, report.Message+"\n"))
|
||||
indent += 1
|
||||
}
|
||||
if report.LeafNodeText != "" {
|
||||
subjectIndent := indent
|
||||
if len(report.ContainerHierarchyTexts) > 0 {
|
||||
r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " ")))
|
||||
r.emit(" ")
|
||||
subjectIndent = 0
|
||||
}
|
||||
r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time.Sub(report.SpecStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
|
||||
indent += 1
|
||||
}
|
||||
if report.CurrentNodeType != types.NodeTypeInvalid {
|
||||
r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType))
|
||||
if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) {
|
||||
r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
|
||||
}
|
||||
|
||||
r.emit(r.f(" (Node Runtime: %s)\n", report.Time.Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
|
||||
indent += 1
|
||||
}
|
||||
if report.CurrentStepText != "" {
|
||||
r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time.Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
|
||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
|
||||
indent += 1
|
||||
}
|
||||
|
||||
if indent > 0 {
|
||||
indent -= 1
|
||||
}
|
||||
|
||||
if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" && (report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)) {
|
||||
r.emit("\n")
|
||||
r.emitGinkgoWriterOutput(indent, report.CapturedGinkgoWriterOutput, 10)
|
||||
}
|
||||
|
||||
if !report.SpecGoroutine().IsZero() {
|
||||
r.emit("\n")
|
||||
r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n"))
|
||||
r.emitGoroutines(indent, report.SpecGoroutine())
|
||||
}
|
||||
|
||||
if len(report.AdditionalReports) > 0 {
|
||||
r.emit("\n")
|
||||
r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}"))
|
||||
for i, additionalReport := range report.AdditionalReports {
|
||||
r.emit(r.fi(indent+1, additionalReport))
|
||||
if i < len(report.AdditionalReports)-1 {
|
||||
r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
|
||||
}
|
||||
}
|
||||
r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}"))
|
||||
}
|
||||
|
||||
highlightedGoroutines := report.HighlightedGoroutines()
|
||||
if len(highlightedGoroutines) > 0 {
|
||||
r.emit("\n")
|
||||
r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n"))
|
||||
r.emitGoroutines(indent, highlightedGoroutines...)
|
||||
}
|
||||
|
||||
otherGoroutines := report.OtherGoroutines()
|
||||
if len(otherGoroutines) > 0 {
|
||||
r.emit("\n")
|
||||
r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n"))
|
||||
r.emitGoroutines(indent, otherGoroutines...)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitGinkgoWriterOutput(indent uint, output string, limit int) {
|
||||
r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
|
||||
if limit == 0 {
|
||||
r.emitBlock(r.fi(indent+1, "%s", output))
|
||||
} else {
|
||||
lines := strings.Split(output, "\n")
|
||||
if len(lines) <= limit {
|
||||
r.emitBlock(r.fi(indent+1, "%s", output))
|
||||
} else {
|
||||
r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
|
||||
for _, line := range lines[len(lines)-limit-1:] {
|
||||
r.emitBlock(r.fi(indent+1, "%s", line))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
|
||||
for idx, g := range goroutines {
|
||||
color := "{{gray}}"
|
||||
if g.HasHighlights() {
|
||||
color = "{{orange}}"
|
||||
}
|
||||
r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State))
|
||||
for _, fc := range g.Stack {
|
||||
if fc.Highlight {
|
||||
r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function))
|
||||
r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line))
|
||||
r.emitSource(indent+3, fc)
|
||||
} else {
|
||||
r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function))
|
||||
r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line))
|
||||
}
|
||||
}
|
||||
|
||||
if idx+1 < len(goroutines) {
|
||||
r.emit("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) {
|
||||
lines := fc.Source
|
||||
if len(lines) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
lTrim := 100000
|
||||
for _, line := range lines {
|
||||
lTrimLine := len(line) - len(strings.TrimLeft(line, " \t"))
|
||||
if lTrimLine < lTrim && len(line) > 0 {
|
||||
lTrim = lTrimLine
|
||||
}
|
||||
}
|
||||
if lTrim == 100000 {
|
||||
lTrim = 0
|
||||
}
|
||||
|
||||
for idx, line := range lines {
|
||||
if len(line) > lTrim {
|
||||
line = line[lTrim:]
|
||||
}
|
||||
if idx == fc.SourceHighlight {
|
||||
r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line))
|
||||
} else {
|
||||
r.emit(r.fi(indent, "| %s\n", line))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Emitting to the writer */
|
||||
func (r *DefaultReporter) emit(s string) {
|
||||
if len(s) > 0 {
|
||||
r.lastChar = s[len(s)-1:]
|
||||
r.lastEmissionWasDelimiter = false
|
||||
r.writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitBlock(s string) {
|
||||
if len(s) > 0 {
|
||||
if r.lastChar != "\n" {
|
||||
r.emit("\n")
|
||||
}
|
||||
r.emit(s)
|
||||
if r.lastChar != "\n" {
|
||||
r.emit("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) emitDelimiter() {
|
||||
if r.lastEmissionWasDelimiter {
|
||||
return
|
||||
}
|
||||
r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30)))
|
||||
r.lastEmissionWasDelimiter = true
|
||||
}
|
||||
|
||||
/* Rendering text */
|
||||
func (r *DefaultReporter) f(format string, args ...interface{}) string {
|
||||
return r.formatter.F(format, args...)
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
|
||||
return r.formatter.Fi(indentation, format, args...)
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
|
||||
return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
|
||||
}
|
||||
|
||||
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string {
|
||||
texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
|
||||
texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
|
||||
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
|
||||
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
|
||||
} else {
|
||||
texts = append(texts, report.LeafNodeText)
|
||||
}
|
||||
labels = append(labels, report.LeafNodeLabels)
|
||||
locations = append(locations, report.LeafNodeLocation)
|
||||
|
||||
failureLocation := report.Failure.FailureNodeLocation
|
||||
if usePreciseFailureLocation {
|
||||
failureLocation = report.Failure.Location
|
||||
}
|
||||
|
||||
switch report.Failure.FailureNodeContext {
|
||||
case types.FailureNodeAtTopLevel:
|
||||
texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...)
|
||||
locations = append([]types.CodeLocation{failureLocation}, locations...)
|
||||
labels = append([][]string{{}}, labels...)
|
||||
case types.FailureNodeInContainer:
|
||||
i := report.Failure.FailureNodeContainerIndex
|
||||
texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType)
|
||||
locations[i] = failureLocation
|
||||
case types.FailureNodeIsLeafNode:
|
||||
i := len(texts) - 1
|
||||
texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText)
|
||||
locations[i] = failureLocation
|
||||
}
|
||||
|
||||
out := ""
|
||||
if succinct {
|
||||
out += r.f("%s", r.cycleJoin(texts, " "))
|
||||
flattenedLabels := report.Labels()
|
||||
if len(flattenedLabels) > 0 {
|
||||
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
|
||||
}
|
||||
out += "\n"
|
||||
if usePreciseFailureLocation {
|
||||
out += r.f("{{gray}}%s{{/}}", failureLocation)
|
||||
} else {
|
||||
out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1])
|
||||
}
|
||||
} else {
|
||||
for i := range texts {
|
||||
out += r.fi(uint(i), "%s", texts[i])
|
||||
if len(labels[i]) > 0 {
|
||||
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
|
||||
}
|
||||
out += "\n"
|
||||
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
149
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
Normal file
149
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/v2/config"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters
|
||||
// this has been removed in V2.
|
||||
// Please read the documentation at:
|
||||
// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
|
||||
// for Ginkgo's new behavior and for a migration path.
|
||||
type DeprecatedReporter interface {
|
||||
SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecWillRun(specSummary *types.SpecSummary)
|
||||
SpecDidComplete(specSummary *types.SpecSummary)
|
||||
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SuiteDidEnd(summary *types.SuiteSummary)
|
||||
}
|
||||
|
||||
// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and
|
||||
// calls the custom reporter's methods with appropriately transformed data from the V2 report.
|
||||
//
|
||||
// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()`
|
||||
//
|
||||
// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new
|
||||
// reporting support in V2. It will be removed in a future minor version of Ginkgo.
|
||||
func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) {
|
||||
conf := config.DeprecatedGinkgoConfigType{
|
||||
RandomSeed: report.SuiteConfig.RandomSeed,
|
||||
RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs,
|
||||
FocusStrings: report.SuiteConfig.FocusStrings,
|
||||
SkipStrings: report.SuiteConfig.SkipStrings,
|
||||
FailOnPending: report.SuiteConfig.FailOnPending,
|
||||
FailFast: report.SuiteConfig.FailFast,
|
||||
FlakeAttempts: report.SuiteConfig.FlakeAttempts,
|
||||
EmitSpecProgress: report.SuiteConfig.EmitSpecProgress,
|
||||
DryRun: report.SuiteConfig.DryRun,
|
||||
ParallelNode: report.SuiteConfig.ParallelProcess,
|
||||
ParallelTotal: report.SuiteConfig.ParallelTotal,
|
||||
SyncHost: report.SuiteConfig.ParallelHost,
|
||||
StreamHost: report.SuiteConfig.ParallelHost,
|
||||
}
|
||||
|
||||
summary := &types.DeprecatedSuiteSummary{
|
||||
SuiteDescription: report.SuiteDescription,
|
||||
SuiteID: report.SuitePath,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs,
|
||||
NumberOfTotalSpecs: report.PreRunStats.TotalSpecs,
|
||||
NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun,
|
||||
}
|
||||
|
||||
reporter.SuiteWillBegin(conf, summary)
|
||||
|
||||
for _, spec := range report.SpecReports {
|
||||
switch spec.LeafNodeType {
|
||||
case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
|
||||
setupSummary := &types.DeprecatedSetupSummary{
|
||||
ComponentType: spec.LeafNodeType,
|
||||
CodeLocation: spec.LeafNodeLocation,
|
||||
State: spec.State,
|
||||
RunTime: spec.RunTime,
|
||||
Failure: failureFor(spec),
|
||||
CapturedOutput: spec.CombinedOutput(),
|
||||
SuiteID: report.SuitePath,
|
||||
}
|
||||
reporter.BeforeSuiteDidRun(setupSummary)
|
||||
case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
|
||||
setupSummary := &types.DeprecatedSetupSummary{
|
||||
ComponentType: spec.LeafNodeType,
|
||||
CodeLocation: spec.LeafNodeLocation,
|
||||
State: spec.State,
|
||||
RunTime: spec.RunTime,
|
||||
Failure: failureFor(spec),
|
||||
CapturedOutput: spec.CombinedOutput(),
|
||||
SuiteID: report.SuitePath,
|
||||
}
|
||||
reporter.AfterSuiteDidRun(setupSummary)
|
||||
case types.NodeTypeIt:
|
||||
componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{}
|
||||
componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...)
|
||||
componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...)
|
||||
componentTexts = append(componentTexts, spec.LeafNodeText)
|
||||
componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation)
|
||||
|
||||
specSummary := &types.DeprecatedSpecSummary{
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.State,
|
||||
RunTime: spec.RunTime,
|
||||
Failure: failureFor(spec),
|
||||
NumberOfSamples: spec.NumAttempts,
|
||||
CapturedOutput: spec.CombinedOutput(),
|
||||
SuiteID: report.SuitePath,
|
||||
}
|
||||
reporter.SpecWillRun(specSummary)
|
||||
reporter.SpecDidComplete(specSummary)
|
||||
|
||||
switch spec.State {
|
||||
case types.SpecStatePending:
|
||||
summary.NumberOfPendingSpecs += 1
|
||||
case types.SpecStateSkipped:
|
||||
summary.NumberOfSkippedSpecs += 1
|
||||
case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted:
|
||||
summary.NumberOfFailedSpecs += 1
|
||||
case types.SpecStatePassed:
|
||||
summary.NumberOfPassedSpecs += 1
|
||||
if spec.NumAttempts > 1 {
|
||||
summary.NumberOfFlakedSpecs += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
summary.SuiteSucceeded = report.SuiteSucceeded
|
||||
summary.RunTime = report.RunTime
|
||||
|
||||
reporter.SuiteDidEnd(summary)
|
||||
}
|
||||
|
||||
func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure {
|
||||
if spec.Failure.IsZero() {
|
||||
return types.DeprecatedSpecFailure{}
|
||||
}
|
||||
|
||||
index := 0
|
||||
switch spec.Failure.FailureNodeContext {
|
||||
case types.FailureNodeInContainer:
|
||||
index = spec.Failure.FailureNodeContainerIndex
|
||||
case types.FailureNodeAtTopLevel:
|
||||
index = -1
|
||||
case types.FailureNodeIsLeafNode:
|
||||
index = len(spec.ContainerHierarchyTexts) - 1
|
||||
if spec.LeafNodeText != "" {
|
||||
index += 1
|
||||
}
|
||||
}
|
||||
|
||||
return types.DeprecatedSpecFailure{
|
||||
Message: spec.Failure.Message,
|
||||
Location: spec.Failure.Location,
|
||||
ForwardedPanic: spec.Failure.ForwardedPanic,
|
||||
ComponentIndex: index,
|
||||
ComponentType: spec.Failure.FailureNodeType,
|
||||
ComponentCodeLocation: spec.Failure.FailureNodeLocation,
|
||||
}
|
||||
}
|
60
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
Normal file
60
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
//GenerateJSONReport produces a JSON-formatted report at the passed in destination
|
||||
func GenerateJSONReport(report types.Report, destination string) error {
|
||||
f, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
err = enc.Encode([]types.Report{
|
||||
report,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
|
||||
//It skips over reports that fail to decode but reports on them via the returned messages []string
|
||||
func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
|
||||
messages := []string{}
|
||||
allReports := []types.Report{}
|
||||
for _, source := range sources {
|
||||
reports := []types.Report{}
|
||||
data, err := os.ReadFile(source)
|
||||
if err != nil {
|
||||
messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
|
||||
continue
|
||||
}
|
||||
err = json.Unmarshal(data, &reports)
|
||||
if err != nil {
|
||||
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
|
||||
continue
|
||||
}
|
||||
os.Remove(source)
|
||||
allReports = append(allReports, reports...)
|
||||
}
|
||||
|
||||
f, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
err = enc.Encode(allReports)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
return messages, f.Close()
|
||||
}
|
358
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
Normal file
358
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
generated
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
/*
|
||||
|
||||
JUnit XML Reporter for Ginkgo
|
||||
|
||||
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||
|
||||
The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/
|
||||
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/config"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type JUnitTestSuites struct {
|
||||
XMLName xml.Name `xml:"testsuites"`
|
||||
// Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
|
||||
Tests int `xml:"tests,attr"`
|
||||
// Disabled maps onto specs that are pending and/or skipped
|
||||
Disabled int `xml:"disabled,attr"`
|
||||
// Errors maps onto specs that panicked or were interrupted
|
||||
Errors int `xml:"errors,attr"`
|
||||
// Failures maps onto specs that failed
|
||||
Failures int `xml:"failures,attr"`
|
||||
// Time is the time in seconds to execute all test suites
|
||||
Time float64 `xml:"time,attr"`
|
||||
|
||||
//The set of all test suites
|
||||
TestSuites []JUnitTestSuite `xml:"testsuite"`
|
||||
}
|
||||
|
||||
type JUnitTestSuite struct {
|
||||
// Name maps onto the description of the test suite - maps onto Report.SuiteDescription
|
||||
Name string `xml:"name,attr"`
|
||||
// Package maps onto the absolute path to the test suite - maps onto Report.SuitePath
|
||||
Package string `xml:"package,attr"`
|
||||
// Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite)
|
||||
Tests int `xml:"tests,attr"`
|
||||
// Disabled maps onto specs that are pending
|
||||
Disabled int `xml:"disabled,attr"`
|
||||
// Skiped maps onto specs that are skipped
|
||||
Skipped int `xml:"skipped,attr"`
|
||||
// Errors maps onto specs that panicked or were interrupted
|
||||
Errors int `xml:"errors,attr"`
|
||||
// Failures maps onto specs that failed
|
||||
Failures int `xml:"failures,attr"`
|
||||
// Time is the time in seconds to execute all the test suite - maps onto Report.RunTime
|
||||
Time float64 `xml:"time,attr"`
|
||||
// Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime
|
||||
Timestamp string `xml:"timestamp,attr"`
|
||||
|
||||
//Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs
|
||||
Properties JUnitProperties `xml:"properties"`
|
||||
|
||||
//TestCases capture the individual specs
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
}
|
||||
|
||||
type JUnitProperties struct {
|
||||
Properties []JUnitProperty `xml:"property"`
|
||||
}
|
||||
|
||||
func (jup JUnitProperties) WithName(name string) string {
|
||||
for _, property := range jup.Properties {
|
||||
if property.Name == name {
|
||||
return property.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type JUnitProperty struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Value string `xml:"value,attr"`
|
||||
}
|
||||
|
||||
type JUnitTestCase struct {
|
||||
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
|
||||
Name string `xml:"name,attr"`
|
||||
// Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription
|
||||
Classname string `xml:"classname,attr"`
|
||||
// Status maps onto the string representation of SpecReport.State
|
||||
Status string `xml:"status,attr"`
|
||||
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
|
||||
Time float64 `xml:"time,attr"`
|
||||
//Skipped is populated with a message if the test was skipped or pending
|
||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||
//Error is populated if the test panicked or was interrupted
|
||||
Error *JUnitError `xml:"error,omitempty"`
|
||||
//Failure is populated if the test failed
|
||||
Failure *JUnitFailure `xml:"failure,omitempty"`
|
||||
//SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr
|
||||
SystemOut string `xml:"system-out,omitempty"`
|
||||
//SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput
|
||||
SystemErr string `xml:"system-err,omitempty"`
|
||||
}
|
||||
|
||||
type JUnitSkipped struct {
|
||||
// Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON)
|
||||
Message string `xml:"message,attr"`
|
||||
}
|
||||
|
||||
type JUnitError struct {
|
||||
//Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted"
|
||||
Message string `xml:"message,attr"`
|
||||
//Type is one of "panicked" or "interrupted"
|
||||
Type string `xml:"type,attr"`
|
||||
//Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines
|
||||
Description string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitFailure struct {
|
||||
//Message maps onto the failure message - equivalent to SpecReport.Failure.Message
|
||||
Message string `xml:"message,attr"`
|
||||
//Type is "failed"
|
||||
Type string `xml:"type,attr"`
|
||||
//Description maps onto the location and stack trace of the failure
|
||||
Description string `xml:",chardata"`
|
||||
}
|
||||
|
||||
func GenerateJUnitReport(report types.Report, dst string) error {
|
||||
suite := JUnitTestSuite{
|
||||
Name: report.SuiteDescription,
|
||||
Package: report.SuitePath,
|
||||
Time: report.RunTime.Seconds(),
|
||||
Timestamp: report.StartTime.Format("2006-01-02T15:04:05"),
|
||||
Properties: JUnitProperties{
|
||||
Properties: []JUnitProperty{
|
||||
{"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)},
|
||||
{"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
|
||||
{"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
|
||||
{"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
|
||||
{"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
|
||||
{"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
|
||||
{"LabelFilter", report.SuiteConfig.LabelFilter},
|
||||
{"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
|
||||
{"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
|
||||
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
|
||||
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
|
||||
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
|
||||
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
|
||||
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
|
||||
{"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)},
|
||||
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
|
||||
{"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
|
||||
{"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, spec := range report.SpecReports {
|
||||
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
|
||||
if spec.FullText() != "" {
|
||||
name = name + " " + spec.FullText()
|
||||
}
|
||||
labels := spec.Labels()
|
||||
if len(labels) > 0 {
|
||||
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||
}
|
||||
|
||||
test := JUnitTestCase{
|
||||
Name: name,
|
||||
Classname: report.SuiteDescription,
|
||||
Status: spec.State.String(),
|
||||
Time: spec.RunTime.Seconds(),
|
||||
SystemOut: systemOutForUnstructuredReporters(spec),
|
||||
SystemErr: systemErrForUnstructuredReporters(spec),
|
||||
}
|
||||
suite.Tests += 1
|
||||
|
||||
switch spec.State {
|
||||
case types.SpecStateSkipped:
|
||||
message := "skipped"
|
||||
if spec.Failure.Message != "" {
|
||||
message += " - " + spec.Failure.Message
|
||||
}
|
||||
test.Skipped = &JUnitSkipped{Message: message}
|
||||
suite.Skipped += 1
|
||||
case types.SpecStatePending:
|
||||
test.Skipped = &JUnitSkipped{Message: "pending"}
|
||||
suite.Disabled += 1
|
||||
case types.SpecStateFailed:
|
||||
test.Failure = &JUnitFailure{
|
||||
Message: spec.Failure.Message,
|
||||
Type: "failed",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
suite.Failures += 1
|
||||
case types.SpecStateTimedout:
|
||||
test.Failure = &JUnitFailure{
|
||||
Message: spec.Failure.Message,
|
||||
Type: "timedout",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
suite.Failures += 1
|
||||
case types.SpecStateInterrupted:
|
||||
test.Error = &JUnitError{
|
||||
Message: spec.Failure.Message,
|
||||
Type: "interrupted",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
suite.Errors += 1
|
||||
case types.SpecStateAborted:
|
||||
test.Failure = &JUnitFailure{
|
||||
Message: spec.Failure.Message,
|
||||
Type: "aborted",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
suite.Errors += 1
|
||||
case types.SpecStatePanicked:
|
||||
test.Error = &JUnitError{
|
||||
Message: spec.Failure.ForwardedPanic,
|
||||
Type: "panicked",
|
||||
Description: failureDescriptionForUnstructuredReporters(spec),
|
||||
}
|
||||
suite.Errors += 1
|
||||
}
|
||||
|
||||
suite.TestCases = append(suite.TestCases, test)
|
||||
}
|
||||
|
||||
junitReport := JUnitTestSuites{
|
||||
Tests: suite.Tests,
|
||||
Disabled: suite.Disabled + suite.Skipped,
|
||||
Errors: suite.Errors,
|
||||
Failures: suite.Failures,
|
||||
Time: suite.Time,
|
||||
TestSuites: []JUnitTestSuite{suite},
|
||||
}
|
||||
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.WriteString(xml.Header)
|
||||
encoder := xml.NewEncoder(f)
|
||||
encoder.Indent(" ", " ")
|
||||
encoder.Encode(junitReport)
|
||||
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) {
|
||||
messages := []string{}
|
||||
mergedReport := JUnitTestSuites{}
|
||||
for _, source := range sources {
|
||||
report := JUnitTestSuites{}
|
||||
f, err := os.Open(source)
|
||||
if err != nil {
|
||||
messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
|
||||
continue
|
||||
}
|
||||
err = xml.NewDecoder(f).Decode(&report)
|
||||
if err != nil {
|
||||
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
|
||||
continue
|
||||
}
|
||||
os.Remove(source)
|
||||
|
||||
mergedReport.Tests += report.Tests
|
||||
mergedReport.Disabled += report.Disabled
|
||||
mergedReport.Errors += report.Errors
|
||||
mergedReport.Failures += report.Failures
|
||||
mergedReport.Time += report.Time
|
||||
mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
|
||||
}
|
||||
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return messages, err
|
||||
}
|
||||
f.WriteString(xml.Header)
|
||||
encoder := xml.NewEncoder(f)
|
||||
encoder.Indent(" ", " ")
|
||||
encoder.Encode(mergedReport)
|
||||
|
||||
return messages, f.Close()
|
||||
}
|
||||
|
||||
func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
|
||||
out := &strings.Builder{}
|
||||
out.WriteString(spec.Failure.Location.String() + "\n")
|
||||
out.WriteString(spec.Failure.Location.FullStackTrace)
|
||||
if !spec.Failure.ProgressReport.IsZero() {
|
||||
out.WriteString("\n")
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(spec.Failure.ProgressReport)
|
||||
}
|
||||
if len(spec.AdditionalFailures) > 0 {
|
||||
out.WriteString("\nThere were additional failures detected after the initial failure:\n")
|
||||
for i, additionalFailure := range spec.AdditionalFailures {
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitFailure(0, additionalFailure.State, additionalFailure.Failure, true)
|
||||
if i < len(spec.AdditionalFailures)-1 {
|
||||
out.WriteString("----------\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func systemErrForUnstructuredReporters(spec types.SpecReport) string {
|
||||
out := &strings.Builder{}
|
||||
gw := spec.CapturedGinkgoWriterOutput
|
||||
cursor := 0
|
||||
for _, pr := range spec.ProgressReports {
|
||||
if cursor < pr.GinkgoWriterOffset {
|
||||
if pr.GinkgoWriterOffset < len(gw) {
|
||||
out.WriteString(gw[cursor:pr.GinkgoWriterOffset])
|
||||
cursor = pr.GinkgoWriterOffset
|
||||
} else if cursor < len(gw) {
|
||||
out.WriteString(gw[cursor:])
|
||||
cursor = len(gw)
|
||||
}
|
||||
}
|
||||
NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(pr)
|
||||
}
|
||||
|
||||
if cursor < len(gw) {
|
||||
out.WriteString(gw[cursor:])
|
||||
}
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func systemOutForUnstructuredReporters(spec types.SpecReport) string {
|
||||
systemOut := spec.CapturedStdOutErr
|
||||
if len(spec.ReportEntries) > 0 {
|
||||
systemOut += "\nReport Entries:\n"
|
||||
for i, entry := range spec.ReportEntries {
|
||||
systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano))
|
||||
if representation := entry.StringRepresentation(); representation != "" {
|
||||
systemOut += representation + "\n"
|
||||
}
|
||||
if i+1 < len(spec.ReportEntries) {
|
||||
systemOut += "--\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
return systemOut
|
||||
}
|
||||
|
||||
// Deprecated JUnitReporter (so folks can still compile their suites)
|
||||
type JUnitReporter struct{}
|
||||
|
||||
func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} }
|
||||
func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {}
|
||||
func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {}
|
||||
func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {}
|
||||
func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {}
|
||||
func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {}
|
||||
func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {}
|
21
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
Normal file
21
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
type Reporter interface {
|
||||
SuiteWillBegin(report types.Report)
|
||||
WillRun(report types.SpecReport)
|
||||
DidRun(report types.SpecReport)
|
||||
SuiteDidEnd(report types.Report)
|
||||
EmitProgressReport(progressReport types.ProgressReport)
|
||||
}
|
||||
|
||||
type NoopReporter struct{}
|
||||
|
||||
func (n NoopReporter) SuiteWillBegin(report types.Report) {}
|
||||
func (n NoopReporter) WillRun(report types.SpecReport) {}
|
||||
func (n NoopReporter) DidRun(report types.SpecReport) {}
|
||||
func (n NoopReporter) SuiteDidEnd(report types.Report) {}
|
||||
func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
|
101
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
Normal file
101
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
|
||||
TeamCity Reporter for Ginkgo
|
||||
|
||||
Makes use of TeamCity's support for Service Messages
|
||||
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
func tcEscape(s string) string {
|
||||
s = strings.ReplaceAll(s, "|", "||")
|
||||
s = strings.ReplaceAll(s, "'", "|'")
|
||||
s = strings.ReplaceAll(s, "\n", "|n")
|
||||
s = strings.ReplaceAll(s, "\r", "|r")
|
||||
s = strings.ReplaceAll(s, "[", "|[")
|
||||
s = strings.ReplaceAll(s, "]", "|]")
|
||||
return s
|
||||
}
|
||||
|
||||
func GenerateTeamcityReport(report types.Report, dst string) error {
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := report.SuiteDescription
|
||||
labels := report.SuiteLabels
|
||||
if len(labels) > 0 {
|
||||
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||
}
|
||||
fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
|
||||
for _, spec := range report.SpecReports {
|
||||
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
|
||||
if spec.FullText() != "" {
|
||||
name = name + " " + spec.FullText()
|
||||
}
|
||||
labels := spec.Labels()
|
||||
if len(labels) > 0 {
|
||||
name = name + " [" + strings.Join(labels, ", ") + "]"
|
||||
}
|
||||
|
||||
name = tcEscape(name)
|
||||
fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
|
||||
switch spec.State {
|
||||
case types.SpecStatePending:
|
||||
fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name)
|
||||
case types.SpecStateSkipped:
|
||||
message := "skipped"
|
||||
if spec.Failure.Message != "" {
|
||||
message += " - " + spec.Failure.Message
|
||||
}
|
||||
fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message))
|
||||
case types.SpecStateFailed:
|
||||
details := failureDescriptionForUnstructuredReporters(spec)
|
||||
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
|
||||
case types.SpecStatePanicked:
|
||||
details := failureDescriptionForUnstructuredReporters(spec)
|
||||
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details))
|
||||
case types.SpecStateTimedout:
|
||||
details := failureDescriptionForUnstructuredReporters(spec)
|
||||
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
|
||||
case types.SpecStateInterrupted:
|
||||
details := failureDescriptionForUnstructuredReporters(spec)
|
||||
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
|
||||
case types.SpecStateAborted:
|
||||
details := failureDescriptionForUnstructuredReporters(spec)
|
||||
fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
|
||||
}
|
||||
|
||||
fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec)))
|
||||
fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec)))
|
||||
fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0))
|
||||
}
|
||||
fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription))
|
||||
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) {
|
||||
messages := []string{}
|
||||
merged := []byte{}
|
||||
for _, source := range sources {
|
||||
data, err := os.ReadFile(source)
|
||||
if err != nil {
|
||||
messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
|
||||
continue
|
||||
}
|
||||
os.Remove(source)
|
||||
merged = append(merged, data...)
|
||||
}
|
||||
return messages, os.WriteFile(dst, merged, 0666)
|
||||
}
|
91
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
Normal file
91
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package types
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type CodeLocation struct {
|
||||
FileName string `json:",omitempty"`
|
||||
LineNumber int `json:",omitempty"`
|
||||
FullStackTrace string `json:",omitempty"`
|
||||
CustomMessage string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (codeLocation CodeLocation) String() string {
|
||||
if codeLocation.CustomMessage != "" {
|
||||
return codeLocation.CustomMessage
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
|
||||
}
|
||||
|
||||
func (codeLocation CodeLocation) ContentsOfLine() string {
|
||||
if codeLocation.CustomMessage != "" {
|
||||
return ""
|
||||
}
|
||||
contents, err := os.ReadFile(codeLocation.FileName)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
lines := strings.Split(string(contents), "\n")
|
||||
if len(lines) < codeLocation.LineNumber {
|
||||
return ""
|
||||
}
|
||||
return lines[codeLocation.LineNumber-1]
|
||||
}
|
||||
|
||||
func NewCustomCodeLocation(message string) CodeLocation {
|
||||
return CodeLocation{
|
||||
CustomMessage: message,
|
||||
}
|
||||
}
|
||||
|
||||
func NewCodeLocation(skip int) CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
return CodeLocation{FileName: file, LineNumber: line}
|
||||
}
|
||||
|
||||
func NewCodeLocationWithStackTrace(skip int) CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
||||
return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
}
|
||||
|
||||
// PruneStack removes references to functions that are internal to Ginkgo
|
||||
// and the Go runtime from a stack string and a certain number of stack entries
|
||||
// at the beginning of the stack. The stack string has the format
|
||||
// as returned by runtime/debug.Stack. The leading goroutine information is
|
||||
// optional and always removed if present. Beware that runtime/debug.Stack
|
||||
// adds itself as first entry, so typically skip must be >= 1 to remove that
|
||||
// entry.
|
||||
func PruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
// Ensure that the even entries are the method names and the
|
||||
// odd entries the source code information.
|
||||
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
||||
// Ignore "goroutine 29 [running]:" line.
|
||||
stack = stack[1:]
|
||||
}
|
||||
// The "+1" is for skipping over the initial entry, which is
|
||||
// runtime/debug.Stack() itself.
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" {
|
||||
prunedStack = stack
|
||||
} else {
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
// We filter out based on the source code file name.
|
||||
if !re.Match([]byte(stack[i*2+1])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
740
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
Normal file
740
vendor/github.com/onsi/ginkgo/v2/types/config.go
generated
vendored
Normal file
@@ -0,0 +1,740 @@
|
||||
/*
|
||||
Ginkgo accepts a number of configuration options.
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Configuration controlling how an individual test suite is run
|
||||
type SuiteConfig struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
FocusStrings []string
|
||||
SkipStrings []string
|
||||
FocusFiles []string
|
||||
SkipFiles []string
|
||||
LabelFilter string
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
FlakeAttempts int
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
PollProgressAfter time.Duration
|
||||
PollProgressInterval time.Duration
|
||||
Timeout time.Duration
|
||||
OutputInterceptorMode string
|
||||
SourceRoots []string
|
||||
GracePeriod time.Duration
|
||||
|
||||
ParallelProcess int
|
||||
ParallelTotal int
|
||||
ParallelHost string
|
||||
}
|
||||
|
||||
func NewDefaultSuiteConfig() SuiteConfig {
|
||||
return SuiteConfig{
|
||||
RandomSeed: time.Now().Unix(),
|
||||
Timeout: time.Hour,
|
||||
ParallelProcess: 1,
|
||||
ParallelTotal: 1,
|
||||
GracePeriod: 30 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
type VerbosityLevel uint
|
||||
|
||||
const (
|
||||
VerbosityLevelSuccinct VerbosityLevel = iota
|
||||
VerbosityLevelNormal
|
||||
VerbosityLevelVerbose
|
||||
VerbosityLevelVeryVerbose
|
||||
)
|
||||
|
||||
func (vl VerbosityLevel) GT(comp VerbosityLevel) bool {
|
||||
return vl > comp
|
||||
}
|
||||
|
||||
func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool {
|
||||
return vl >= comp
|
||||
}
|
||||
|
||||
func (vl VerbosityLevel) Is(comp VerbosityLevel) bool {
|
||||
return vl == comp
|
||||
}
|
||||
|
||||
func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool {
|
||||
return vl <= comp
|
||||
}
|
||||
|
||||
func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
|
||||
return vl < comp
|
||||
}
|
||||
|
||||
// Configuration for Ginkgo's reporter
|
||||
type ReporterConfig struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold time.Duration
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
VeryVerbose bool
|
||||
FullTrace bool
|
||||
AlwaysEmitGinkgoWriter bool
|
||||
|
||||
JSONReport string
|
||||
JUnitReport string
|
||||
TeamcityReport string
|
||||
}
|
||||
|
||||
func (rc ReporterConfig) Verbosity() VerbosityLevel {
|
||||
if rc.Succinct {
|
||||
return VerbosityLevelSuccinct
|
||||
} else if rc.Verbose {
|
||||
return VerbosityLevelVerbose
|
||||
} else if rc.VeryVerbose {
|
||||
return VerbosityLevelVeryVerbose
|
||||
}
|
||||
return VerbosityLevelNormal
|
||||
}
|
||||
|
||||
func (rc ReporterConfig) WillGenerateReport() bool {
|
||||
return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
|
||||
}
|
||||
|
||||
func NewDefaultReporterConfig() ReporterConfig {
|
||||
return ReporterConfig{
|
||||
SlowSpecThreshold: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// Configuration for the Ginkgo CLI
|
||||
type CLIConfig struct {
|
||||
//for build, run, and watch
|
||||
Recurse bool
|
||||
SkipPackage string
|
||||
RequireSuite bool
|
||||
NumCompilers int
|
||||
|
||||
//for run and watch only
|
||||
Procs int
|
||||
Parallel bool
|
||||
AfterRunHook string
|
||||
OutputDir string
|
||||
KeepSeparateCoverprofiles bool
|
||||
KeepSeparateReports bool
|
||||
|
||||
//for run only
|
||||
KeepGoing bool
|
||||
UntilItFails bool
|
||||
Repeat int
|
||||
RandomizeSuites bool
|
||||
|
||||
//for watch only
|
||||
Depth int
|
||||
WatchRegExp string
|
||||
}
|
||||
|
||||
func NewDefaultCLIConfig() CLIConfig {
|
||||
return CLIConfig{
|
||||
Depth: 1,
|
||||
WatchRegExp: `\.go$`,
|
||||
}
|
||||
}
|
||||
|
||||
func (g CLIConfig) ComputedProcs() int {
|
||||
if g.Procs > 0 {
|
||||
return g.Procs
|
||||
}
|
||||
|
||||
n := 1
|
||||
if g.Parallel {
|
||||
n = runtime.NumCPU()
|
||||
if n > 4 {
|
||||
n = n - 1
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (g CLIConfig) ComputedNumCompilers() int {
|
||||
if g.NumCompilers > 0 {
|
||||
return g.NumCompilers
|
||||
}
|
||||
|
||||
return runtime.NumCPU()
|
||||
}
|
||||
|
||||
// Configuration for the Ginkgo CLI capturing available go flags
|
||||
// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags).
|
||||
// More details can be found at:
|
||||
// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/
|
||||
type GoFlagsConfig struct {
|
||||
//build-time flags for code-and-performance analysis
|
||||
Race bool
|
||||
Cover bool
|
||||
CoverMode string
|
||||
CoverPkg string
|
||||
Vet string
|
||||
|
||||
//run-time flags for code-and-performance analysis
|
||||
BlockProfile string
|
||||
BlockProfileRate int
|
||||
CoverProfile string
|
||||
CPUProfile string
|
||||
MemProfile string
|
||||
MemProfileRate int
|
||||
MutexProfile string
|
||||
MutexProfileFraction int
|
||||
Trace string
|
||||
|
||||
//build-time flags for building
|
||||
A bool
|
||||
ASMFlags string
|
||||
BuildMode string
|
||||
Compiler string
|
||||
GCCGoFlags string
|
||||
GCFlags string
|
||||
InstallSuffix string
|
||||
LDFlags string
|
||||
LinkShared bool
|
||||
Mod string
|
||||
N bool
|
||||
ModFile string
|
||||
ModCacheRW bool
|
||||
MSan bool
|
||||
PkgDir string
|
||||
Tags string
|
||||
TrimPath bool
|
||||
ToolExec string
|
||||
Work bool
|
||||
X bool
|
||||
}
|
||||
|
||||
func NewDefaultGoFlagsConfig() GoFlagsConfig {
|
||||
return GoFlagsConfig{}
|
||||
}
|
||||
|
||||
func (g GoFlagsConfig) BinaryMustBePreserved() bool {
|
||||
return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
|
||||
}
|
||||
|
||||
// Configuration that were deprecated in 2.0
|
||||
type deprecatedConfig struct {
|
||||
DebugParallel bool
|
||||
NoisySkippings bool
|
||||
NoisyPendings bool
|
||||
RegexScansFilePath bool
|
||||
SlowSpecThresholdWithFLoatUnits float64
|
||||
Stream bool
|
||||
Notify bool
|
||||
}
|
||||
|
||||
// Flags
|
||||
|
||||
// Flags sections used by both the CLI and the Ginkgo test process
|
||||
var FlagSections = GinkgoFlagSections{
|
||||
{Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"},
|
||||
{Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"},
|
||||
{Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"},
|
||||
{Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism",
|
||||
Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."},
|
||||
{Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
|
||||
{Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
|
||||
{Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
|
||||
{Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
|
||||
{Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
|
||||
{Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
|
||||
Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
|
||||
{Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
|
||||
{Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"},
|
||||
{Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true,
|
||||
Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."},
|
||||
}
|
||||
|
||||
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||
var SuiteConfigFlags = GinkgoFlags{
|
||||
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
|
||||
Usage: "The seed used to randomize the spec suite."},
|
||||
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
|
||||
|
||||
{KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."},
|
||||
{KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
|
||||
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
|
||||
|
||||
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
|
||||
{KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug",
|
||||
Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."},
|
||||
{KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
|
||||
Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
|
||||
{KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
|
||||
Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."},
|
||||
{KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug",
|
||||
Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."},
|
||||
{KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h",
|
||||
Usage: "Test suite fails if it does not complete within the specified timeout."},
|
||||
{KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s",
|
||||
Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."},
|
||||
{KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none",
|
||||
Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."},
|
||||
|
||||
{KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
|
||||
Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
|
||||
{KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
|
||||
Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
|
||||
{KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
|
||||
Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."},
|
||||
{KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
|
||||
Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."},
|
||||
{KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
|
||||
Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."},
|
||||
|
||||
{KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
|
||||
{KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
|
||||
}
|
||||
|
||||
// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
|
||||
var ParallelConfigFlags = GinkgoFlags{
|
||||
{KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
|
||||
Usage: "This worker process's (one-indexed) process number. For running specs in parallel."},
|
||||
{KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
|
||||
Usage: "The total number of worker processes. For running specs in parallel."},
|
||||
{KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI",
|
||||
Usage: "The address for the server that will synchronize the processes."},
|
||||
}
|
||||
|
||||
// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
|
||||
var ReporterConfigFlags = GinkgoFlags{
|
||||
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, suppress color output in default reporter."},
|
||||
{KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s",
|
||||
Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."},
|
||||
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
|
||||
Usage: "If set, emits more output including GinkgoWriter contents."},
|
||||
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
|
||||
Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."},
|
||||
{KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output",
|
||||
Usage: "If set, default reporter prints out a very succinct report"},
|
||||
{KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
|
||||
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
|
||||
{KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed",
|
||||
Usage: "If set, default reporter prints out captured output of passed tests."},
|
||||
|
||||
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
|
||||
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
|
||||
{KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
|
||||
Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
|
||||
{KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
|
||||
Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."},
|
||||
|
||||
{KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold",
|
||||
Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
|
||||
{KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||
{KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
|
||||
}
|
||||
|
||||
// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
|
||||
func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
|
||||
flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
|
||||
flags = flags.WithPrefix("ginkgo")
|
||||
bindings := map[string]interface{}{
|
||||
"S": suiteConfig,
|
||||
"R": reporterConfig,
|
||||
"D": &deprecatedConfig{},
|
||||
}
|
||||
extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"}
|
||||
|
||||
return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection)
|
||||
}
|
||||
|
||||
// VetConfig validates that the Ginkgo test process' configuration is sound
|
||||
func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error {
|
||||
errors := []error{}
|
||||
|
||||
if flagSet.WasSet("count") || flagSet.WasSet("test.count") {
|
||||
flag := flagSet.Lookup("count")
|
||||
if flag == nil {
|
||||
flag = flagSet.Lookup("test.count")
|
||||
}
|
||||
count, err := strconv.Atoi(flag.Value.String())
|
||||
if err != nil || count != 1 {
|
||||
errors = append(errors, GinkgoErrors.InvalidGoFlagCount())
|
||||
}
|
||||
}
|
||||
|
||||
if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") {
|
||||
errors = append(errors, GinkgoErrors.InvalidGoFlagParallel())
|
||||
}
|
||||
|
||||
if suiteConfig.ParallelTotal < 1 {
|
||||
errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration())
|
||||
}
|
||||
|
||||
if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 {
|
||||
errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration())
|
||||
}
|
||||
|
||||
if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" {
|
||||
errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration())
|
||||
}
|
||||
|
||||
if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 {
|
||||
errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration())
|
||||
}
|
||||
|
||||
if suiteConfig.GracePeriod <= 0 {
|
||||
errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero())
|
||||
}
|
||||
|
||||
if len(suiteConfig.FocusFiles) > 0 {
|
||||
_, err := ParseFileFilters(suiteConfig.FocusFiles)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(suiteConfig.SkipFiles) > 0 {
|
||||
_, err := ParseFileFilters(suiteConfig.SkipFiles)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if suiteConfig.LabelFilter != "" {
|
||||
_, err := ParseLabelFilter(suiteConfig.LabelFilter)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
|
||||
case "", "dup", "swap", "none":
|
||||
default:
|
||||
errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode))
|
||||
}
|
||||
|
||||
numVerbosity := 0
|
||||
for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} {
|
||||
if v {
|
||||
numVerbosity++
|
||||
}
|
||||
}
|
||||
if numVerbosity > 1 {
|
||||
errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration())
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands
|
||||
var GinkgoCLISharedFlags = GinkgoFlags{
|
||||
{KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites",
|
||||
Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."},
|
||||
{KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags",
|
||||
UsageArgument: "comma-separated list of packages",
|
||||
Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."},
|
||||
{KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."},
|
||||
{KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)",
|
||||
Usage: "When running multiple packages, the number of concurrent compilations to perform."},
|
||||
}
|
||||
|
||||
// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run)
|
||||
var GinkgoCLIRunAndWatchFlags = GinkgoFlags{
|
||||
{KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
|
||||
Usage: "The number of parallel test nodes to run."},
|
||||
{KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
|
||||
Usage: "--nodes is an alias for --procs"},
|
||||
{KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel",
|
||||
Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."},
|
||||
{KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "Command to run when a test suite completes."},
|
||||
{KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support",
|
||||
Usage: "A location to place all generated profiles and reports."},
|
||||
{KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."},
|
||||
{KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output",
|
||||
Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."},
|
||||
|
||||
{KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"},
|
||||
{KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"},
|
||||
}
|
||||
|
||||
// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands
|
||||
var GinkgoCLIRunFlags = GinkgoFlags{
|
||||
{KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, failures from earlier test suites do not prevent later test suites from running."},
|
||||
{KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."},
|
||||
{KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once",
|
||||
Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."},
|
||||
{KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags",
|
||||
Usage: "If set, ginkgo will randomize the order in which test suites run."},
|
||||
}
|
||||
|
||||
// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands
|
||||
var GinkgoCLIWatchFlags = GinkgoFlags{
|
||||
{KeyPath: "C.Depth", Name: "depth", SectionKey: "watch",
|
||||
Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."},
|
||||
{KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags",
|
||||
UsageArgument: "Regular Expression",
|
||||
UsageDefaultValue: `\.go$`,
|
||||
Usage: "Only files matching this regular expression will be watched for changes."},
|
||||
}
|
||||
|
||||
// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
|
||||
var GoBuildFlags = GinkgoFlags{
|
||||
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
|
||||
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
|
||||
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
|
||||
{KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`},
|
||||
{KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."},
|
||||
|
||||
{KeyPath: "Go.A", Name: "a", SectionKey: "go-build",
|
||||
Usage: "force rebuilding of packages that are already up-to-date."},
|
||||
{KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||
Usage: "arguments to pass on each go tool asm invocation."},
|
||||
{KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
|
||||
Usage: "build mode to use. See 'go help buildmode' for more."},
|
||||
{KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
|
||||
Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
|
||||
{KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||
Usage: "arguments to pass on each gccgo compiler/linker invocation."},
|
||||
{KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||
Usage: "arguments to pass on each go tool compile invocation."},
|
||||
{KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build",
|
||||
Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."},
|
||||
{KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
|
||||
Usage: "arguments to pass on each go tool link invocation."},
|
||||
{KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build",
|
||||
Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."},
|
||||
{KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build",
|
||||
Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."},
|
||||
{KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build",
|
||||
Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."},
|
||||
{KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build",
|
||||
Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`},
|
||||
{KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build",
|
||||
Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."},
|
||||
{KeyPath: "Go.N", Name: "n", SectionKey: "go-build",
|
||||
Usage: "print the commands but do not run them."},
|
||||
{KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build",
|
||||
Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."},
|
||||
{KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build",
|
||||
Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"},
|
||||
{KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build",
|
||||
Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`},
|
||||
{KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build",
|
||||
Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm <arguments for asm>'."},
|
||||
{KeyPath: "Go.Work", Name: "work", SectionKey: "go-build",
|
||||
Usage: "print the name of the temporary work directory and do not delete it when exiting."},
|
||||
{KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
|
||||
Usage: "print the commands."},
|
||||
}
|
||||
|
||||
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
|
||||
var GoRunFlags = GinkgoFlags{
|
||||
{KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
|
||||
Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
|
||||
{KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||
Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
|
||||
{KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
|
||||
Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`},
|
||||
{KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||
Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`},
|
||||
{KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||
Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`},
|
||||
{KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
|
||||
Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`},
|
||||
{KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis",
|
||||
Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`},
|
||||
{KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis",
|
||||
Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`},
|
||||
{KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis",
|
||||
Usage: `Write an execution trace to the specified file before exiting.`},
|
||||
}
|
||||
|
||||
// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound
|
||||
// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers
|
||||
func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) {
|
||||
errors := []error{}
|
||||
|
||||
if cliConfig.Repeat > 0 && cliConfig.UntilItFails {
|
||||
errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
|
||||
}
|
||||
|
||||
//initialize the output directory
|
||||
if cliConfig.OutputDir != "" {
|
||||
err := os.MkdirAll(cliConfig.OutputDir, 0777)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
//ensure cover mode is configured appropriately
|
||||
if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" {
|
||||
goFlagsConfig.Cover = true
|
||||
}
|
||||
if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" {
|
||||
goFlagsConfig.CoverProfile = "coverprofile.out"
|
||||
}
|
||||
|
||||
return cliConfig, goFlagsConfig, errors
|
||||
}
|
||||
|
||||
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
|
||||
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) {
|
||||
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
|
||||
// the built test binary can generate a coverprofile
|
||||
if goFlagsConfig.CoverProfile != "" {
|
||||
goFlagsConfig.Cover = true
|
||||
}
|
||||
|
||||
args := []string{"test", "-c", "-o", destination, packageToBuild}
|
||||
goArgs, err := GenerateFlagArgs(
|
||||
GoBuildFlags,
|
||||
map[string]interface{}{
|
||||
"Go": &goFlagsConfig,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
args = append(args, goArgs...)
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary
|
||||
func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) {
|
||||
var flags GinkgoFlags
|
||||
flags = SuiteConfigFlags.WithPrefix("ginkgo")
|
||||
flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
|
||||
flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
|
||||
flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
|
||||
bindings := map[string]interface{}{
|
||||
"S": &suiteConfig,
|
||||
"R": &reporterConfig,
|
||||
"Go": &goFlagsConfig,
|
||||
}
|
||||
|
||||
return GenerateFlagArgs(flags, bindings)
|
||||
}
|
||||
|
||||
// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
|
||||
func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
|
||||
flags := GoRunFlags.WithPrefix("test")
|
||||
bindings := map[string]interface{}{
|
||||
"Go": &goFlagsConfig,
|
||||
}
|
||||
|
||||
args, err := GenerateFlagArgs(flags, bindings)
|
||||
if err != nil {
|
||||
return args, err
|
||||
}
|
||||
args = append(args, "--test.v")
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command
|
||||
func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
|
||||
flags := SuiteConfigFlags
|
||||
flags = flags.CopyAppend(ReporterConfigFlags...)
|
||||
flags = flags.CopyAppend(GinkgoCLISharedFlags...)
|
||||
flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
|
||||
flags = flags.CopyAppend(GinkgoCLIRunFlags...)
|
||||
flags = flags.CopyAppend(GoBuildFlags...)
|
||||
flags = flags.CopyAppend(GoRunFlags...)
|
||||
|
||||
bindings := map[string]interface{}{
|
||||
"S": suiteConfig,
|
||||
"R": reporterConfig,
|
||||
"C": cliConfig,
|
||||
"Go": goFlagsConfig,
|
||||
"D": &deprecatedConfig{},
|
||||
}
|
||||
|
||||
return NewGinkgoFlagSet(flags, bindings, FlagSections)
|
||||
}
|
||||
|
||||
// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command
|
||||
func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
|
||||
flags := SuiteConfigFlags
|
||||
flags = flags.CopyAppend(ReporterConfigFlags...)
|
||||
flags = flags.CopyAppend(GinkgoCLISharedFlags...)
|
||||
flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
|
||||
flags = flags.CopyAppend(GinkgoCLIWatchFlags...)
|
||||
flags = flags.CopyAppend(GoBuildFlags...)
|
||||
flags = flags.CopyAppend(GoRunFlags...)
|
||||
|
||||
bindings := map[string]interface{}{
|
||||
"S": suiteConfig,
|
||||
"R": reporterConfig,
|
||||
"C": cliConfig,
|
||||
"Go": goFlagsConfig,
|
||||
"D": &deprecatedConfig{},
|
||||
}
|
||||
|
||||
return NewGinkgoFlagSet(flags, bindings, FlagSections)
|
||||
}
|
||||
|
||||
// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command
|
||||
func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
|
||||
flags := GinkgoCLISharedFlags
|
||||
flags = flags.CopyAppend(GoBuildFlags...)
|
||||
|
||||
bindings := map[string]interface{}{
|
||||
"C": cliConfig,
|
||||
"Go": goFlagsConfig,
|
||||
"D": &deprecatedConfig{},
|
||||
}
|
||||
|
||||
flagSections := make(GinkgoFlagSections, len(FlagSections))
|
||||
copy(flagSections, FlagSections)
|
||||
for i := range flagSections {
|
||||
if flagSections[i].Key == "multiple-suites" {
|
||||
flagSections[i].Heading = "Building Multiple Suites"
|
||||
}
|
||||
if flagSections[i].Key == "go-build" {
|
||||
flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags",
|
||||
Description: "These flags are inherited from go build."}
|
||||
}
|
||||
}
|
||||
|
||||
return NewGinkgoFlagSet(flags, bindings, flagSections)
|
||||
}
|
||||
|
||||
func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
|
||||
flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
|
||||
|
||||
bindings := map[string]interface{}{
|
||||
"C": cliConfig,
|
||||
}
|
||||
|
||||
flagSections := make(GinkgoFlagSections, len(FlagSections))
|
||||
copy(flagSections, FlagSections)
|
||||
for i := range flagSections {
|
||||
if flagSections[i].Key == "multiple-suites" {
|
||||
flagSections[i].Heading = "Fetching Labels from Multiple Suites"
|
||||
}
|
||||
}
|
||||
|
||||
return NewGinkgoFlagSet(flags, bindings, flagSections)
|
||||
}
|
141
vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
generated
vendored
Normal file
141
vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters.
|
||||
*/
|
||||
|
||||
type SuiteSummary = DeprecatedSuiteSummary
|
||||
type SetupSummary = DeprecatedSetupSummary
|
||||
type SpecSummary = DeprecatedSpecSummary
|
||||
type SpecMeasurement = DeprecatedSpecMeasurement
|
||||
type SpecComponentType = NodeType
|
||||
type SpecFailure = DeprecatedSpecFailure
|
||||
|
||||
var (
|
||||
SpecComponentTypeInvalid = NodeTypeInvalid
|
||||
SpecComponentTypeContainer = NodeTypeContainer
|
||||
SpecComponentTypeIt = NodeTypeIt
|
||||
SpecComponentTypeBeforeEach = NodeTypeBeforeEach
|
||||
SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach
|
||||
SpecComponentTypeAfterEach = NodeTypeAfterEach
|
||||
SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach
|
||||
SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite
|
||||
SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite
|
||||
SpecComponentTypeAfterSuite = NodeTypeAfterSuite
|
||||
SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite
|
||||
)
|
||||
|
||||
type DeprecatedSuiteSummary struct {
|
||||
SuiteDescription string
|
||||
SuiteSucceeded bool
|
||||
SuiteID string
|
||||
|
||||
NumberOfSpecsBeforeParallelization int
|
||||
NumberOfTotalSpecs int
|
||||
NumberOfSpecsThatWillBeRun int
|
||||
NumberOfPendingSpecs int
|
||||
NumberOfSkippedSpecs int
|
||||
NumberOfPassedSpecs int
|
||||
NumberOfFailedSpecs int
|
||||
NumberOfFlakedSpecs int
|
||||
RunTime time.Duration
|
||||
}
|
||||
|
||||
type DeprecatedSetupSummary struct {
|
||||
ComponentType SpecComponentType
|
||||
CodeLocation CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
type DeprecatedSpecSummary struct {
|
||||
ComponentTexts []string
|
||||
ComponentCodeLocations []CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
IsMeasurement bool
|
||||
NumberOfSamples int
|
||||
Measurements map[string]*DeprecatedSpecMeasurement
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecSummary) HasFailureState() bool {
|
||||
return s.State.Is(SpecStateFailureStates)
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecSummary) TimedOut() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecSummary) Panicked() bool {
|
||||
return s.State == SpecStatePanicked
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecSummary) Failed() bool {
|
||||
return s.State == SpecStateFailed
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecSummary) Passed() bool {
|
||||
return s.State == SpecStatePassed
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecSummary) Skipped() bool {
|
||||
return s.State == SpecStateSkipped
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecSummary) Pending() bool {
|
||||
return s.State == SpecStatePending
|
||||
}
|
||||
|
||||
type DeprecatedSpecFailure struct {
|
||||
Message string
|
||||
Location CodeLocation
|
||||
ForwardedPanic string
|
||||
|
||||
ComponentIndex int
|
||||
ComponentType SpecComponentType
|
||||
ComponentCodeLocation CodeLocation
|
||||
}
|
||||
|
||||
type DeprecatedSpecMeasurement struct {
|
||||
Name string
|
||||
Info interface{}
|
||||
Order int
|
||||
|
||||
Results []float64
|
||||
|
||||
Smallest float64
|
||||
Largest float64
|
||||
Average float64
|
||||
StdDeviation float64
|
||||
|
||||
SmallestLabel string
|
||||
LargestLabel string
|
||||
AverageLabel string
|
||||
Units string
|
||||
Precision int
|
||||
}
|
||||
|
||||
func (s DeprecatedSpecMeasurement) PrecisionFmt() string {
|
||||
if s.Precision == 0 {
|
||||
return "%f"
|
||||
}
|
||||
|
||||
str := strconv.Itoa(s.Precision)
|
||||
|
||||
return "%." + str + "f"
|
||||
}
|
170
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
Normal file
170
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
)
|
||||
|
||||
type Deprecation struct {
|
||||
Message string
|
||||
DocLink string
|
||||
Version string
|
||||
}
|
||||
|
||||
type deprecations struct{}
|
||||
|
||||
var Deprecations = deprecations{}
|
||||
|
||||
func (d deprecations) CustomReporter() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:",
|
||||
DocLink: "removed-custom-reporters",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Async() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.",
|
||||
DocLink: "removed-async-testing",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Measure() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.",
|
||||
DocLink: "removed-measure",
|
||||
Version: "1.16.3",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) ParallelNode() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
|
||||
DocLink: "renamed-ginkgoparallelnode",
|
||||
Version: "1.16.4",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) CurrentGinkgoTestDescription() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.",
|
||||
DocLink: "changed-currentginkgotestdescription",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Convert() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "The convert command is deprecated in Ginkgo V2",
|
||||
DocLink: "removed-ginkgo-convert",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Blur() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Nodot() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.",
|
||||
DocLink: "removed-ginkgo-nodot",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
type DeprecationTracker struct {
|
||||
deprecations map[Deprecation][]CodeLocation
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewDeprecationTracker() *DeprecationTracker {
|
||||
return &DeprecationTracker{
|
||||
deprecations: map[Deprecation][]CodeLocation{},
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) {
|
||||
ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS")
|
||||
if deprecation.Version != "" && ackVersion != "" {
|
||||
ack := ParseSemVer(ackVersion)
|
||||
version := ParseSemVer(deprecation.Version)
|
||||
if ack.GreaterThanOrEqualTo(version) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
if len(cl) == 1 {
|
||||
d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0])
|
||||
} else {
|
||||
d.deprecations[deprecation] = []CodeLocation{}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeprecationTracker) DidTrackDeprecations() bool {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
return len(d.deprecations) > 0
|
||||
}
|
||||
|
||||
func (d *DeprecationTracker) DeprecationsReport() string {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
|
||||
out += formatter.F("{{light-yellow}}============================================={{/}}\n")
|
||||
for deprecation, locations := range d.deprecations {
|
||||
out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
|
||||
if deprecation.DocLink != "" {
|
||||
out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink)
|
||||
}
|
||||
for _, location := range locations {
|
||||
out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
|
||||
}
|
||||
}
|
||||
out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n")
|
||||
out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION)
|
||||
return out
|
||||
}
|
||||
|
||||
type SemVer struct {
|
||||
Major int
|
||||
Minor int
|
||||
Patch int
|
||||
}
|
||||
|
||||
func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool {
|
||||
return (s.Major > o.Major) ||
|
||||
(s.Major == o.Major && s.Minor > o.Minor) ||
|
||||
(s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch)
|
||||
}
|
||||
|
||||
func ParseSemVer(semver string) SemVer {
|
||||
out := SemVer{}
|
||||
semver = strings.TrimFunc(semver, func(r rune) bool {
|
||||
return !(unicode.IsNumber(r) || r == '.')
|
||||
})
|
||||
components := strings.Split(semver, ".")
|
||||
if len(components) > 0 {
|
||||
out.Major, _ = strconv.Atoi(components[0])
|
||||
}
|
||||
if len(components) > 1 {
|
||||
out.Minor, _ = strconv.Atoi(components[1])
|
||||
}
|
||||
if len(components) > 2 {
|
||||
out.Patch, _ = strconv.Atoi(components[2])
|
||||
}
|
||||
return out
|
||||
}
|
43
vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
generated
vendored
Normal file
43
vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package types
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type EnumSupport struct {
|
||||
toString map[uint]string
|
||||
toEnum map[string]uint
|
||||
maxEnum uint
|
||||
}
|
||||
|
||||
func NewEnumSupport(toString map[uint]string) EnumSupport {
|
||||
toEnum, maxEnum := map[string]uint{}, uint(0)
|
||||
for k, v := range toString {
|
||||
toEnum[v] = k
|
||||
if maxEnum < k {
|
||||
maxEnum = k
|
||||
}
|
||||
}
|
||||
return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum}
|
||||
}
|
||||
|
||||
func (es EnumSupport) String(e uint) string {
|
||||
if e > es.maxEnum {
|
||||
return es.toString[0]
|
||||
}
|
||||
return es.toString[e]
|
||||
}
|
||||
|
||||
func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) {
|
||||
var dec string
|
||||
if err := json.Unmarshal(b, &dec); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (es EnumSupport) MarshJSON(e uint) ([]byte, error) {
|
||||
if e == 0 || e > es.maxEnum {
|
||||
return json.Marshal(nil)
|
||||
}
|
||||
return json.Marshal(es.toString[e])
|
||||
}
|
621
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
Normal file
621
vendor/github.com/onsi/ginkgo/v2/types/errors.go
generated
vendored
Normal file
@@ -0,0 +1,621 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
)
|
||||
|
||||
type GinkgoError struct {
|
||||
Heading string
|
||||
Message string
|
||||
DocLink string
|
||||
CodeLocation CodeLocation
|
||||
}
|
||||
|
||||
func (g GinkgoError) Error() string {
|
||||
out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading)
|
||||
if (g.CodeLocation != CodeLocation{}) {
|
||||
contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ")
|
||||
if contentsOfLine != "" {
|
||||
out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine)
|
||||
}
|
||||
out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation)
|
||||
}
|
||||
if g.Message != "" {
|
||||
out += formatter.Fiw(1, formatter.COLS, g.Message)
|
||||
out += "\n\n"
|
||||
}
|
||||
if g.DocLink != "" {
|
||||
out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
type ginkgoErrors struct{}
|
||||
|
||||
var GinkgoErrors = ginkgoErrors{}
|
||||
|
||||
func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Your Test Panicked",
|
||||
Message: `When you, or your assertion library, calls Ginkgo's Fail(),
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
|
||||
However, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
at the top of the goroutine that caused this panic.
|
||||
|
||||
Alternatively, you may have made an assertion outside of a Ginkgo
|
||||
leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to
|
||||
an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`,
|
||||
DocLink: "mental-model-how-ginkgo-handles-failure",
|
||||
CodeLocation: cl,
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) RerunningSuite() error {
|
||||
return GinkgoError{
|
||||
Heading: "Rerunning Suite",
|
||||
Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`),
|
||||
DocLink: "repeating-spec-runs-and-managing-flaky-specs",
|
||||
}
|
||||
}
|
||||
|
||||
/* Tree construction errors */
|
||||
|
||||
func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo detected an issue with your spec structure",
|
||||
Message: formatter.F(
|
||||
`It looks like you are trying to add a {{bold}}[%s]{{/}} node
|
||||
to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running.
|
||||
|
||||
To enable randomization and parallelization Ginkgo requires the spec tree
|
||||
to be fully constructed up front. In practice, this means that you can
|
||||
only create nodes like {{bold}}[%s]{{/}} at the top-level or within the
|
||||
body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Assertion or Panic detected during tree construction",
|
||||
Message: formatter.F(
|
||||
`Ginkgo detected a panic while constructing the spec tree.
|
||||
You may be trying to make an assertion in the body of a container node
|
||||
(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}).
|
||||
|
||||
Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}},
|
||||
{{bold}}It{{/}}, etc.
|
||||
|
||||
{{bold}}Here's the content of the panic that was caught:{{/}}
|
||||
%v`, caughtPanic),
|
||||
CodeLocation: cl,
|
||||
DocLink: "no-assertions-in-container-nodes",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
|
||||
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
||||
if nodeType.Is(NodeTypeReportAfterSuite) {
|
||||
docLink = "reporting-nodes---reportaftersuite"
|
||||
}
|
||||
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo detected an issue with your spec structure",
|
||||
Message: formatter.F(
|
||||
`It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node.
|
||||
|
||||
{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: docLink,
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
|
||||
docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
|
||||
if nodeType.Is(NodeTypeReportAfterSuite) {
|
||||
docLink = "reporting-nodes---reportaftersuite"
|
||||
}
|
||||
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo detected an issue with your spec structure",
|
||||
Message: formatter.F(
|
||||
`It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running.
|
||||
|
||||
{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: docLink,
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
|
||||
return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation)
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
|
||||
return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation)
|
||||
}
|
||||
|
||||
func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo detected an issue with your spec structure",
|
||||
Message: formatter.F(
|
||||
`It looks like you are trying to add a {{bold}}[%s]{{/}} node but
|
||||
you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}.
|
||||
|
||||
Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown),
|
||||
CodeLocation: cl,
|
||||
DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite",
|
||||
}
|
||||
}
|
||||
|
||||
/* Decorator errors */
|
||||
func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Decorator",
|
||||
Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Combination of Decorators: Focused and Pending",
|
||||
Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly",
|
||||
Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
|
||||
return GinkgoError{
|
||||
Heading: "Unknown Decorator",
|
||||
Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Function",
|
||||
Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
|
||||
mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
|
||||
if nodeType.Is(NodeTypeContainer) {
|
||||
mustGet = "{{bold}}func(){{/}}"
|
||||
}
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Function",
|
||||
Message: formatter.F(`[%s] node must be passed `+mustGet+`.
|
||||
You passed {{bold}}%s{{/}} instead.`, nodeType, t),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error {
|
||||
mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Function",
|
||||
Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function.
|
||||
You passed {{bold}}%s{{/}} instead.`, t),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error {
|
||||
mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}"
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Function",
|
||||
Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function.
|
||||
You passed {{bold}}%s{{/}} instead.`, t),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Multiple Functions",
|
||||
Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Missing Functions",
|
||||
Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
|
||||
Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "spec-timeouts-and-interruptible-nodes",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
|
||||
Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`),
|
||||
CodeLocation: cl,
|
||||
DocLink: "spec-timeouts-and-interruptible-nodes",
|
||||
}
|
||||
}
|
||||
|
||||
/* Ordered Container errors */
|
||||
func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Serial Node in Non-Serial Ordered Container",
|
||||
Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "node-decorators-overview",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: "Setup Node not in Ordered Container",
|
||||
Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType),
|
||||
CodeLocation: cl,
|
||||
DocLink: "ordered-containers",
|
||||
}
|
||||
}
|
||||
|
||||
/* DeferCleanup errors */
|
||||
func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "DeferCleanup requires a valid function",
|
||||
Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "cleaning-up-our-cleanup-code-defercleanup",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "DeferCleanup must be called inside a setup or subject node",
|
||||
Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "cleaning-up-our-cleanup-code-defercleanup",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
|
||||
Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "cleaning-up-our-cleanup-code-defercleanup",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "DeferCleanup cannot be called in a DeferCleanup callback",
|
||||
Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup",
|
||||
CodeLocation: cl,
|
||||
DocLink: "cleaning-up-our-cleanup-code-defercleanup",
|
||||
}
|
||||
}
|
||||
|
||||
/* ReportEntry errors */
|
||||
func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
|
||||
return GinkgoError{
|
||||
Heading: "Too Many ReportEntry Values",
|
||||
Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
|
||||
CodeLocation: cl,
|
||||
DocLink: "attaching-data-to-reports",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo detected an issue with your spec structure",
|
||||
Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
|
||||
CodeLocation: cl,
|
||||
DocLink: "attaching-data-to-reports",
|
||||
}
|
||||
}
|
||||
|
||||
/* By errors */
|
||||
func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo detected an issue with your spec structure",
|
||||
Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
|
||||
CodeLocation: cl,
|
||||
DocLink: "documenting-complex-specs-by",
|
||||
}
|
||||
}
|
||||
|
||||
/* FileFilter and SkipFilter errors */
|
||||
func (g ginkgoErrors) InvalidFileFilter(filter string) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid File Filter",
|
||||
Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter),
|
||||
DocLink: "filtering-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid File Filter Regular Expression",
|
||||
Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err),
|
||||
DocLink: "filtering-specs",
|
||||
}
|
||||
}
|
||||
|
||||
/* Label Errors */
|
||||
func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error {
|
||||
var message string
|
||||
if location >= 0 {
|
||||
for i, r := range input {
|
||||
if i == location {
|
||||
message += "{{red}}{{bold}}{{underline}}"
|
||||
}
|
||||
message += string(r)
|
||||
if i == location {
|
||||
message += "{{/}}"
|
||||
}
|
||||
}
|
||||
} else {
|
||||
message = input
|
||||
}
|
||||
message += "\n" + error
|
||||
return GinkgoError{
|
||||
Heading: "Syntax Error Parsing Label Filter",
|
||||
Message: message,
|
||||
DocLink: "spec-labels",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Label",
|
||||
Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label),
|
||||
CodeLocation: cl,
|
||||
DocLink: "spec-labels",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Empty Label",
|
||||
Message: "Labels cannot be empty",
|
||||
CodeLocation: cl,
|
||||
DocLink: "spec-labels",
|
||||
}
|
||||
}
|
||||
|
||||
/* Table errors */
|
||||
func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "DescribeTable passed multiple functions",
|
||||
Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "Invalid Entry description",
|
||||
Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.",
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("No parameters have been passed to the Table Function"),
|
||||
Message: fmt.Sprintf("The Table Function expected at least 1 parameter"),
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: "DescribeTable passed incorrect parameter type",
|
||||
Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name),
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("Too few parameters passed in to %s", kind),
|
||||
Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("Too many parameters passed in to %s", kind),
|
||||
Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
|
||||
Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual),
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
|
||||
Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual),
|
||||
CodeLocation: cl,
|
||||
DocLink: "table-specs",
|
||||
}
|
||||
}
|
||||
|
||||
/* Parallel Synchronization errors */
|
||||
|
||||
func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
|
||||
return GinkgoError{
|
||||
Heading: "Test Report unavailable because a Ginkgo parallel process disappeared",
|
||||
Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error {
|
||||
return GinkgoError{
|
||||
Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1",
|
||||
Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
|
||||
return GinkgoError{
|
||||
Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back",
|
||||
Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.",
|
||||
}
|
||||
}
|
||||
|
||||
/* Configuration errors */
|
||||
|
||||
func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
|
||||
return GinkgoError{
|
||||
Heading: "Unknown Type passed to RunSpecs",
|
||||
Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
|
||||
}
|
||||
}
|
||||
|
||||
var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead."
|
||||
|
||||
func (g ginkgoErrors) InvalidParallelTotalConfiguration() error {
|
||||
return GinkgoError{
|
||||
Heading: "-ginkgo.parallel.total must be >= 1",
|
||||
Message: sharedParallelErrorMessage,
|
||||
DocLink: "spec-parallelization",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidParallelProcessConfiguration() error {
|
||||
return GinkgoError{
|
||||
Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total",
|
||||
Message: sharedParallelErrorMessage,
|
||||
DocLink: "spec-parallelization",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) MissingParallelHostConfiguration() error {
|
||||
return GinkgoError{
|
||||
Heading: "-ginkgo.parallel.host is missing",
|
||||
Message: sharedParallelErrorMessage,
|
||||
DocLink: "spec-parallelization",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) UnreachableParallelHost(host string) error {
|
||||
return GinkgoError{
|
||||
Heading: "Could not reach ginkgo.parallel.host:" + host,
|
||||
Message: sharedParallelErrorMessage,
|
||||
DocLink: "spec-parallelization",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) DryRunInParallelConfiguration() error {
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo only performs -dryRun in serial mode.",
|
||||
Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) GracePeriodCannotBeZero() error {
|
||||
return GinkgoError{
|
||||
Heading: "Ginkgo requires a positive --grace-period.",
|
||||
Message: "Please set --grace-period to a positive duration. The default is 30s.",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) ConflictingVerbosityConfiguration() error {
|
||||
return GinkgoError{
|
||||
Heading: "Conflicting reporter verbosity settings.",
|
||||
Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error {
|
||||
return GinkgoError{
|
||||
Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value),
|
||||
Message: "You must choose one of 'dup', 'swap', or 'none'.",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidGoFlagCount() error {
|
||||
return GinkgoError{
|
||||
Heading: "Use of go test -count",
|
||||
Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) InvalidGoFlagParallel() error {
|
||||
return GinkgoError{
|
||||
Heading: "Use of go test -parallel",
|
||||
Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.",
|
||||
}
|
||||
}
|
||||
|
||||
func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
|
||||
return GinkgoError{
|
||||
Heading: "--repeat and --until-it-fails are both set",
|
||||
Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?",
|
||||
}
|
||||
}
|
||||
|
||||
/* Stack-Trace parsing errors */
|
||||
|
||||
func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
|
||||
return GinkgoError{
|
||||
Heading: "Failed to Parse Stack Trace",
|
||||
Message: message,
|
||||
}
|
||||
}
|
106
vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
generated
vendored
Normal file
106
vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ParseFileFilters(filters []string) (FileFilters, error) {
|
||||
ffs := FileFilters{}
|
||||
for _, filter := range filters {
|
||||
ff := FileFilter{}
|
||||
if filter == "" {
|
||||
return nil, GinkgoErrors.InvalidFileFilter(filter)
|
||||
}
|
||||
components := strings.Split(filter, ":")
|
||||
if !(len(components) == 1 || len(components) == 2) {
|
||||
return nil, GinkgoErrors.InvalidFileFilter(filter)
|
||||
}
|
||||
|
||||
var err error
|
||||
ff.Filename, err = regexp.Compile(components[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(components) == 2 {
|
||||
lineFilters := strings.Split(components[1], ",")
|
||||
for _, lineFilter := range lineFilters {
|
||||
components := strings.Split(lineFilter, "-")
|
||||
if len(components) == 1 {
|
||||
line, err := strconv.Atoi(strings.TrimSpace(components[0]))
|
||||
if err != nil {
|
||||
return nil, GinkgoErrors.InvalidFileFilter(filter)
|
||||
}
|
||||
ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1})
|
||||
} else if len(components) == 2 {
|
||||
line1, err := strconv.Atoi(strings.TrimSpace(components[0]))
|
||||
if err != nil {
|
||||
return nil, GinkgoErrors.InvalidFileFilter(filter)
|
||||
}
|
||||
line2, err := strconv.Atoi(strings.TrimSpace(components[1]))
|
||||
if err != nil {
|
||||
return nil, GinkgoErrors.InvalidFileFilter(filter)
|
||||
}
|
||||
ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2})
|
||||
} else {
|
||||
return nil, GinkgoErrors.InvalidFileFilter(filter)
|
||||
}
|
||||
}
|
||||
}
|
||||
ffs = append(ffs, ff)
|
||||
}
|
||||
return ffs, nil
|
||||
}
|
||||
|
||||
type FileFilter struct {
|
||||
Filename *regexp.Regexp
|
||||
LineFilters LineFilters
|
||||
}
|
||||
|
||||
func (f FileFilter) Matches(locations []CodeLocation) bool {
|
||||
for _, location := range locations {
|
||||
if f.Filename.MatchString(location.FileName) &&
|
||||
f.LineFilters.Matches(location.LineNumber) {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type FileFilters []FileFilter
|
||||
|
||||
func (ffs FileFilters) Matches(locations []CodeLocation) bool {
|
||||
for _, ff := range ffs {
|
||||
if ff.Matches(locations) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type LineFilter struct {
|
||||
Min int
|
||||
Max int
|
||||
}
|
||||
|
||||
func (lf LineFilter) Matches(line int) bool {
|
||||
return lf.Min <= line && line < lf.Max
|
||||
}
|
||||
|
||||
type LineFilters []LineFilter
|
||||
|
||||
func (lfs LineFilters) Matches(line int) bool {
|
||||
if len(lfs) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, lf := range lfs {
|
||||
if lf.Matches(line) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
489
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
Normal file
489
vendor/github.com/onsi/ginkgo/v2/types/flags.go
generated
vendored
Normal file
@@ -0,0 +1,489 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/formatter"
|
||||
)
|
||||
|
||||
type GinkgoFlag struct {
|
||||
Name string
|
||||
KeyPath string
|
||||
SectionKey string
|
||||
|
||||
Usage string
|
||||
UsageArgument string
|
||||
UsageDefaultValue string
|
||||
|
||||
DeprecatedName string
|
||||
DeprecatedDocLink string
|
||||
DeprecatedVersion string
|
||||
|
||||
ExportAs string
|
||||
}
|
||||
|
||||
type GinkgoFlags []GinkgoFlag
|
||||
|
||||
func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags {
|
||||
out := GinkgoFlags{}
|
||||
out = append(out, f...)
|
||||
out = append(out, flags...)
|
||||
return out
|
||||
}
|
||||
|
||||
func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags {
|
||||
if prefix == "" {
|
||||
return f
|
||||
}
|
||||
out := GinkgoFlags{}
|
||||
for _, flag := range f {
|
||||
if flag.Name != "" {
|
||||
flag.Name = prefix + "." + flag.Name
|
||||
}
|
||||
if flag.DeprecatedName != "" {
|
||||
flag.DeprecatedName = prefix + "." + flag.DeprecatedName
|
||||
}
|
||||
if flag.ExportAs != "" {
|
||||
flag.ExportAs = prefix + "." + flag.ExportAs
|
||||
}
|
||||
out = append(out, flag)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags {
|
||||
out := GinkgoFlags{}
|
||||
for _, flag := range f {
|
||||
for _, name := range names {
|
||||
if flag.Name == name {
|
||||
out = append(out, flag)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type GinkgoFlagSection struct {
|
||||
Key string
|
||||
Style string
|
||||
Succinct bool
|
||||
Heading string
|
||||
Description string
|
||||
}
|
||||
|
||||
type GinkgoFlagSections []GinkgoFlagSection
|
||||
|
||||
func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
|
||||
for _, section := range gfs {
|
||||
if section.Key == key {
|
||||
return section, true
|
||||
}
|
||||
}
|
||||
|
||||
return GinkgoFlagSection{}, false
|
||||
}
|
||||
|
||||
type GinkgoFlagSet struct {
|
||||
flags GinkgoFlags
|
||||
bindings interface{}
|
||||
|
||||
sections GinkgoFlagSections
|
||||
extraGoFlagsSection GinkgoFlagSection
|
||||
|
||||
flagSet *flag.FlagSet
|
||||
}
|
||||
|
||||
// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
|
||||
func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
|
||||
return bindFlagSet(GinkgoFlagSet{
|
||||
flags: flags,
|
||||
bindings: bindings,
|
||||
sections: sections,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
|
||||
func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
|
||||
return bindFlagSet(GinkgoFlagSet{
|
||||
flags: flags,
|
||||
bindings: bindings,
|
||||
sections: sections,
|
||||
extraGoFlagsSection: extraGoFlagsSection,
|
||||
}, flagSet)
|
||||
}
|
||||
|
||||
func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) {
|
||||
if flagSet == nil {
|
||||
f.flagSet = flag.NewFlagSet("", flag.ContinueOnError)
|
||||
//suppress all output as Ginkgo is responsible for formatting usage
|
||||
f.flagSet.SetOutput(io.Discard)
|
||||
} else {
|
||||
f.flagSet = flagSet
|
||||
//we're piggybacking on an existing flagset (typically go test) so we have limited control
|
||||
//on user feedback
|
||||
f.flagSet.Usage = f.substituteUsage
|
||||
}
|
||||
|
||||
for _, flag := range f.flags {
|
||||
name := flag.Name
|
||||
|
||||
deprecatedUsage := "[DEPRECATED]"
|
||||
deprecatedName := flag.DeprecatedName
|
||||
if name != "" {
|
||||
deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name)
|
||||
} else if flag.Usage != "" {
|
||||
deprecatedUsage += " " + flag.Usage
|
||||
}
|
||||
|
||||
value, ok := valueAtKeyPath(f.bindings, flag.KeyPath)
|
||||
if !ok {
|
||||
return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
|
||||
}
|
||||
|
||||
iface, addr := value.Interface(), value.Addr().Interface()
|
||||
|
||||
switch value.Type() {
|
||||
case reflect.TypeOf(string("")):
|
||||
if name != "" {
|
||||
f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage)
|
||||
}
|
||||
if deprecatedName != "" {
|
||||
f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage)
|
||||
}
|
||||
case reflect.TypeOf(int64(0)):
|
||||
if name != "" {
|
||||
f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage)
|
||||
}
|
||||
if deprecatedName != "" {
|
||||
f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage)
|
||||
}
|
||||
case reflect.TypeOf(float64(0)):
|
||||
if name != "" {
|
||||
f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage)
|
||||
}
|
||||
if deprecatedName != "" {
|
||||
f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage)
|
||||
}
|
||||
case reflect.TypeOf(int(0)):
|
||||
if name != "" {
|
||||
f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage)
|
||||
}
|
||||
if deprecatedName != "" {
|
||||
f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage)
|
||||
}
|
||||
case reflect.TypeOf(bool(true)):
|
||||
if name != "" {
|
||||
f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage)
|
||||
}
|
||||
if deprecatedName != "" {
|
||||
f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage)
|
||||
}
|
||||
case reflect.TypeOf(time.Duration(0)):
|
||||
if name != "" {
|
||||
f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage)
|
||||
}
|
||||
if deprecatedName != "" {
|
||||
f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage)
|
||||
}
|
||||
|
||||
case reflect.TypeOf([]string{}):
|
||||
if name != "" {
|
||||
f.flagSet.Var(stringSliceVar{value}, name, flag.Usage)
|
||||
}
|
||||
if deprecatedName != "" {
|
||||
f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage)
|
||||
}
|
||||
default:
|
||||
return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface)
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) IsZero() bool {
|
||||
return f.flagSet == nil
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) WasSet(name string) bool {
|
||||
found := false
|
||||
f.flagSet.Visit(func(f *flag.Flag) {
|
||||
if f.Name == name {
|
||||
found = true
|
||||
}
|
||||
})
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) Lookup(name string) *flag.Flag {
|
||||
return f.flagSet.Lookup(name)
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) Parse(args []string) ([]string, error) {
|
||||
if f.IsZero() {
|
||||
return args, nil
|
||||
}
|
||||
err := f.flagSet.Parse(args)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
return f.flagSet.Args(), nil
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) {
|
||||
if f.IsZero() {
|
||||
return
|
||||
}
|
||||
f.flagSet.Visit(func(flag *flag.Flag) {
|
||||
for _, ginkgoFlag := range f.flags {
|
||||
if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) {
|
||||
message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName)
|
||||
if ginkgoFlag.Name != "" {
|
||||
message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name)
|
||||
} else if ginkgoFlag.Usage != "" {
|
||||
message += " " + ginkgoFlag.Usage
|
||||
}
|
||||
|
||||
deprecationTracker.TrackDeprecation(Deprecation{
|
||||
Message: message,
|
||||
DocLink: ginkgoFlag.DeprecatedDocLink,
|
||||
Version: ginkgoFlag.DeprecatedVersion,
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) Usage() string {
|
||||
if f.IsZero() {
|
||||
return ""
|
||||
}
|
||||
groupedFlags := map[GinkgoFlagSection]GinkgoFlags{}
|
||||
ungroupedFlags := GinkgoFlags{}
|
||||
managedFlags := map[string]bool{}
|
||||
extraGoFlags := []*flag.Flag{}
|
||||
|
||||
for _, flag := range f.flags {
|
||||
managedFlags[flag.Name] = true
|
||||
managedFlags[flag.DeprecatedName] = true
|
||||
|
||||
if flag.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
section, ok := f.sections.Lookup(flag.SectionKey)
|
||||
if ok {
|
||||
groupedFlags[section] = append(groupedFlags[section], flag)
|
||||
} else {
|
||||
ungroupedFlags = append(ungroupedFlags, flag)
|
||||
}
|
||||
}
|
||||
|
||||
f.flagSet.VisitAll(func(flag *flag.Flag) {
|
||||
if !managedFlags[flag.Name] {
|
||||
extraGoFlags = append(extraGoFlags, flag)
|
||||
}
|
||||
})
|
||||
|
||||
out := ""
|
||||
for _, section := range f.sections {
|
||||
flags := groupedFlags[section]
|
||||
if len(flags) == 0 {
|
||||
continue
|
||||
}
|
||||
out += f.usageForSection(section)
|
||||
if section.Succinct {
|
||||
succinctFlags := []string{}
|
||||
for _, flag := range flags {
|
||||
if flag.Name != "" {
|
||||
succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name))
|
||||
}
|
||||
}
|
||||
out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n")
|
||||
} else {
|
||||
for _, flag := range flags {
|
||||
out += f.usageForFlag(flag, section.Style)
|
||||
}
|
||||
}
|
||||
out += "\n"
|
||||
}
|
||||
if len(ungroupedFlags) > 0 {
|
||||
for _, flag := range ungroupedFlags {
|
||||
out += f.usageForFlag(flag, "")
|
||||
}
|
||||
out += "\n"
|
||||
}
|
||||
if len(extraGoFlags) > 0 {
|
||||
out += f.usageForSection(f.extraGoFlagsSection)
|
||||
for _, goFlag := range extraGoFlags {
|
||||
out += f.usageForGoFlag(goFlag)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) substituteUsage() {
|
||||
fmt.Fprintln(f.flagSet.Output(), f.Usage())
|
||||
}
|
||||
|
||||
func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
|
||||
if len(keyPath) == 0 {
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(root)
|
||||
components := strings.Split(keyPath, ".")
|
||||
for _, component := range components {
|
||||
val = reflect.Indirect(val)
|
||||
switch val.Kind() {
|
||||
case reflect.Map:
|
||||
val = val.MapIndex(reflect.ValueOf(component))
|
||||
if val.Kind() == reflect.Interface {
|
||||
val = reflect.ValueOf(val.Interface())
|
||||
}
|
||||
case reflect.Struct:
|
||||
val = val.FieldByName(component)
|
||||
default:
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
if (val == reflect.Value{}) {
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
}
|
||||
|
||||
return val, true
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string {
|
||||
out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n")
|
||||
if section.Description != "" {
|
||||
out += formatter.Fiw(0, formatter.COLS, section.Description+"\n")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string {
|
||||
argument := flag.UsageArgument
|
||||
defValue := flag.UsageDefaultValue
|
||||
if argument == "" {
|
||||
value, _ := valueAtKeyPath(f.bindings, flag.KeyPath)
|
||||
switch value.Type() {
|
||||
case reflect.TypeOf(string("")):
|
||||
argument = "string"
|
||||
case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)):
|
||||
argument = "int"
|
||||
case reflect.TypeOf(time.Duration(0)):
|
||||
argument = "duration"
|
||||
case reflect.TypeOf(float64(0)):
|
||||
argument = "float"
|
||||
case reflect.TypeOf([]string{}):
|
||||
argument = "string"
|
||||
}
|
||||
}
|
||||
if argument != "" {
|
||||
argument = "[" + argument + "] "
|
||||
}
|
||||
if defValue != "" {
|
||||
defValue = fmt.Sprintf("(default: %s)", defValue)
|
||||
}
|
||||
hyphens := "--"
|
||||
if len(flag.Name) == 1 {
|
||||
hyphens = "-"
|
||||
}
|
||||
|
||||
out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue)
|
||||
out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage)
|
||||
return out
|
||||
}
|
||||
|
||||
func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string {
|
||||
//Taken directly from the flag package
|
||||
out := fmt.Sprintf(" -%s", goFlag.Name)
|
||||
name, usage := flag.UnquoteUsage(goFlag)
|
||||
if len(name) > 0 {
|
||||
out += " " + name
|
||||
}
|
||||
if len(out) <= 4 {
|
||||
out += "\t"
|
||||
} else {
|
||||
out += "\n \t"
|
||||
}
|
||||
out += strings.ReplaceAll(usage, "\n", "\n \t")
|
||||
out += "\n"
|
||||
return out
|
||||
}
|
||||
|
||||
type stringSliceVar struct {
|
||||
slice reflect.Value
|
||||
}
|
||||
|
||||
func (ssv stringSliceVar) String() string { return "" }
|
||||
func (ssv stringSliceVar) Set(s string) error {
|
||||
ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s})))
|
||||
return nil
|
||||
}
|
||||
|
||||
//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
|
||||
func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
|
||||
result := []string{}
|
||||
for _, flag := range flags {
|
||||
name := flag.ExportAs
|
||||
if name == "" {
|
||||
name = flag.Name
|
||||
}
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
value, ok := valueAtKeyPath(bindings, flag.KeyPath)
|
||||
if !ok {
|
||||
return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
|
||||
}
|
||||
|
||||
iface := value.Interface()
|
||||
switch value.Type() {
|
||||
case reflect.TypeOf(string("")):
|
||||
if iface.(string) != "" {
|
||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(int64(0)):
|
||||
if iface.(int64) != 0 {
|
||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(float64(0)):
|
||||
if iface.(float64) != 0 {
|
||||
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(int(0)):
|
||||
if iface.(int) != 0 {
|
||||
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
|
||||
}
|
||||
case reflect.TypeOf(bool(true)):
|
||||
if iface.(bool) {
|
||||
result = append(result, fmt.Sprintf("--%s", name))
|
||||
}
|
||||
case reflect.TypeOf(time.Duration(0)):
|
||||
if iface.(time.Duration) != time.Duration(0) {
|
||||
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
|
||||
}
|
||||
|
||||
case reflect.TypeOf([]string{}):
|
||||
strings := iface.([]string)
|
||||
for _, s := range strings {
|
||||
result = append(result, fmt.Sprintf("--%s=%s", name, s))
|
||||
}
|
||||
default:
|
||||
return []string{}, fmt.Errorf("unsupported type %T", iface)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
347
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
Normal file
347
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
generated
vendored
Normal file
@@ -0,0 +1,347 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var DEBUG_LABEL_FILTER_PARSING = false
|
||||
|
||||
type LabelFilter func([]string) bool
|
||||
|
||||
func matchLabelAction(label string) LabelFilter {
|
||||
expected := strings.ToLower(label)
|
||||
return func(labels []string) bool {
|
||||
for i := range labels {
|
||||
if strings.ToLower(labels[i]) == expected {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter {
|
||||
return func(labels []string) bool {
|
||||
for i := range labels {
|
||||
if regex.MatchString(labels[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func notAction(filter LabelFilter) LabelFilter {
|
||||
return func(labels []string) bool { return !filter(labels) }
|
||||
}
|
||||
|
||||
func andAction(a, b LabelFilter) LabelFilter {
|
||||
return func(labels []string) bool { return a(labels) && b(labels) }
|
||||
}
|
||||
|
||||
func orAction(a, b LabelFilter) LabelFilter {
|
||||
return func(labels []string) bool { return a(labels) || b(labels) }
|
||||
}
|
||||
|
||||
type lfToken uint
|
||||
|
||||
const (
|
||||
lfTokenInvalid lfToken = iota
|
||||
|
||||
lfTokenRoot
|
||||
lfTokenOpenGroup
|
||||
lfTokenCloseGroup
|
||||
lfTokenNot
|
||||
lfTokenAnd
|
||||
lfTokenOr
|
||||
lfTokenRegexp
|
||||
lfTokenLabel
|
||||
lfTokenEOF
|
||||
)
|
||||
|
||||
func (l lfToken) Precedence() int {
|
||||
switch l {
|
||||
case lfTokenRoot, lfTokenOpenGroup:
|
||||
return 0
|
||||
case lfTokenOr:
|
||||
return 1
|
||||
case lfTokenAnd:
|
||||
return 2
|
||||
case lfTokenNot:
|
||||
return 3
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (l lfToken) String() string {
|
||||
switch l {
|
||||
case lfTokenRoot:
|
||||
return "ROOT"
|
||||
case lfTokenOpenGroup:
|
||||
return "("
|
||||
case lfTokenCloseGroup:
|
||||
return ")"
|
||||
case lfTokenNot:
|
||||
return "!"
|
||||
case lfTokenAnd:
|
||||
return "&&"
|
||||
case lfTokenOr:
|
||||
return "||"
|
||||
case lfTokenRegexp:
|
||||
return "/regexp/"
|
||||
case lfTokenLabel:
|
||||
return "label"
|
||||
case lfTokenEOF:
|
||||
return "EOF"
|
||||
}
|
||||
return "INVALID"
|
||||
}
|
||||
|
||||
type treeNode struct {
|
||||
token lfToken
|
||||
location int
|
||||
value string
|
||||
|
||||
parent *treeNode
|
||||
leftNode *treeNode
|
||||
rightNode *treeNode
|
||||
}
|
||||
|
||||
func (tn *treeNode) setRightNode(node *treeNode) {
|
||||
tn.rightNode = node
|
||||
node.parent = tn
|
||||
}
|
||||
|
||||
func (tn *treeNode) setLeftNode(node *treeNode) {
|
||||
tn.leftNode = node
|
||||
node.parent = tn
|
||||
}
|
||||
|
||||
func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode {
|
||||
if tn.token.Precedence() <= precedence {
|
||||
return tn
|
||||
}
|
||||
return tn.parent.firstAncestorWithPrecedenceLEQ(precedence)
|
||||
}
|
||||
|
||||
func (tn *treeNode) firstUnmatchedOpenNode() *treeNode {
|
||||
if tn.token == lfTokenOpenGroup {
|
||||
return tn
|
||||
}
|
||||
if tn.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return tn.parent.firstUnmatchedOpenNode()
|
||||
}
|
||||
|
||||
func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
|
||||
switch tn.token {
|
||||
case lfTokenOpenGroup:
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.")
|
||||
case lfTokenLabel:
|
||||
return matchLabelAction(tn.value), nil
|
||||
case lfTokenRegexp:
|
||||
re, err := regexp.Compile(tn.value)
|
||||
if err != nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
|
||||
}
|
||||
return matchLabelRegexAction(re), nil
|
||||
}
|
||||
|
||||
if tn.rightNode == nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.")
|
||||
}
|
||||
rightLF, err := tn.rightNode.constructLabelFilter(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch tn.token {
|
||||
case lfTokenRoot, lfTokenCloseGroup:
|
||||
return rightLF, nil
|
||||
case lfTokenNot:
|
||||
return notAction(rightLF), nil
|
||||
}
|
||||
|
||||
if tn.leftNode == nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token))
|
||||
}
|
||||
leftLF, err := tn.leftNode.constructLabelFilter(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch tn.token {
|
||||
case lfTokenAnd:
|
||||
return andAction(leftLF, rightLF), nil
|
||||
case lfTokenOr:
|
||||
return orAction(leftLF, rightLF), nil
|
||||
}
|
||||
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token))
|
||||
}
|
||||
|
||||
func (tn *treeNode) tokenString() string {
|
||||
out := fmt.Sprintf("<%s", tn.token)
|
||||
if tn.value != "" {
|
||||
out += " | " + tn.value
|
||||
}
|
||||
out += ">"
|
||||
return out
|
||||
}
|
||||
|
||||
func (tn *treeNode) toString(indent int) string {
|
||||
out := tn.tokenString() + "\n"
|
||||
if tn.leftNode != nil {
|
||||
out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1))
|
||||
}
|
||||
if tn.rightNode != nil {
|
||||
out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func tokenize(input string) func() (*treeNode, error) {
|
||||
runes, i := []rune(input), 0
|
||||
|
||||
peekIs := func(r rune) bool {
|
||||
if i+1 < len(runes) {
|
||||
return runes[i+1] == r
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
consumeUntil := func(cutset string) (string, int) {
|
||||
j := i
|
||||
for ; j < len(runes); j++ {
|
||||
if strings.IndexRune(cutset, runes[j]) >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(runes[i:j]), j - i
|
||||
}
|
||||
|
||||
return func() (*treeNode, error) {
|
||||
for i < len(runes) && runes[i] == ' ' {
|
||||
i += 1
|
||||
}
|
||||
|
||||
if i >= len(runes) {
|
||||
return &treeNode{token: lfTokenEOF}, nil
|
||||
}
|
||||
|
||||
node := &treeNode{location: i}
|
||||
switch runes[i] {
|
||||
case '&':
|
||||
if !peekIs('&') {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?")
|
||||
}
|
||||
i += 2
|
||||
node.token = lfTokenAnd
|
||||
case '|':
|
||||
if !peekIs('|') {
|
||||
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?")
|
||||
}
|
||||
i += 2
|
||||
node.token = lfTokenOr
|
||||
case '!':
|
||||
i += 1
|
||||
node.token = lfTokenNot
|
||||
case ',':
|
||||
i += 1
|
||||
node.token = lfTokenOr
|
||||
case '(':
|
||||
i += 1
|
||||
node.token = lfTokenOpenGroup
|
||||
case ')':
|
||||
i += 1
|
||||
node.token = lfTokenCloseGroup
|
||||
case '/':
|
||||
i += 1
|
||||
value, n := consumeUntil("/")
|
||||
i += n + 1
|
||||
node.token, node.value = lfTokenRegexp, value
|
||||
default:
|
||||
value, n := consumeUntil("&|!,()/")
|
||||
i += n
|
||||
node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseLabelFilter(input string) (LabelFilter, error) {
|
||||
if DEBUG_LABEL_FILTER_PARSING {
|
||||
fmt.Println("\n==============")
|
||||
fmt.Println("Input: ", input)
|
||||
fmt.Print("Tokens: ")
|
||||
}
|
||||
nextToken := tokenize(input)
|
||||
|
||||
root := &treeNode{token: lfTokenRoot}
|
||||
current := root
|
||||
LOOP:
|
||||
for {
|
||||
node, err := nextToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if DEBUG_LABEL_FILTER_PARSING {
|
||||
fmt.Print(node.tokenString() + " ")
|
||||
}
|
||||
|
||||
switch node.token {
|
||||
case lfTokenEOF:
|
||||
break LOOP
|
||||
case lfTokenLabel, lfTokenRegexp:
|
||||
if current.rightNode != nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
|
||||
}
|
||||
current.setRightNode(node)
|
||||
case lfTokenNot, lfTokenOpenGroup:
|
||||
if current.rightNode != nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token))
|
||||
}
|
||||
current.setRightNode(node)
|
||||
current = node
|
||||
case lfTokenAnd, lfTokenOr:
|
||||
if current.rightNode == nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token))
|
||||
}
|
||||
nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence())
|
||||
node.setLeftNode(nodeToStealFrom.rightNode)
|
||||
nodeToStealFrom.setRightNode(node)
|
||||
current = node
|
||||
case lfTokenCloseGroup:
|
||||
firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
|
||||
if firstUnmatchedOpenNode == nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.")
|
||||
}
|
||||
if firstUnmatchedOpenNode == current && current.rightNode == nil {
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.")
|
||||
}
|
||||
firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed
|
||||
current = firstUnmatchedOpenNode.parent
|
||||
default:
|
||||
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token))
|
||||
}
|
||||
}
|
||||
if DEBUG_LABEL_FILTER_PARSING {
|
||||
fmt.Printf("\n Tree:\n%s", root.toString(0))
|
||||
}
|
||||
return root.constructLabelFilter(input)
|
||||
}
|
||||
|
||||
func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
|
||||
out := strings.TrimSpace(label)
|
||||
if out == "" {
|
||||
return "", GinkgoErrors.InvalidEmptyLabel(cl)
|
||||
}
|
||||
if strings.ContainsAny(out, "&|!,()/") {
|
||||
return "", GinkgoErrors.InvalidLabel(label, cl)
|
||||
}
|
||||
return out, nil
|
||||
}
|
186
vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
generated
vendored
Normal file
186
vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
|
||||
//and across the network connection when running in parallel
|
||||
type ReportEntryValue struct {
|
||||
raw interface{} //unexported to prevent gob from freaking out about unregistered structs
|
||||
AsJSON string
|
||||
Representation string
|
||||
}
|
||||
|
||||
func WrapEntryValue(value interface{}) ReportEntryValue {
|
||||
return ReportEntryValue{
|
||||
raw: value,
|
||||
}
|
||||
}
|
||||
|
||||
func (rev ReportEntryValue) GetRawValue() interface{} {
|
||||
return rev.raw
|
||||
}
|
||||
|
||||
func (rev ReportEntryValue) String() string {
|
||||
if rev.raw == nil {
|
||||
return ""
|
||||
}
|
||||
if colorableStringer, ok := rev.raw.(ColorableStringer); ok {
|
||||
return colorableStringer.ColorableString()
|
||||
}
|
||||
|
||||
if stringer, ok := rev.raw.(fmt.Stringer); ok {
|
||||
return stringer.String()
|
||||
}
|
||||
if rev.Representation != "" {
|
||||
return rev.Representation
|
||||
}
|
||||
return fmt.Sprintf("%+v", rev.raw)
|
||||
}
|
||||
|
||||
func (rev ReportEntryValue) MarshalJSON() ([]byte, error) {
|
||||
//All this to capture the representation at encoding-time, not creating time
|
||||
//This way users can Report on pointers and get their final values at reporting-time
|
||||
out := struct {
|
||||
AsJSON string
|
||||
Representation string
|
||||
}{
|
||||
Representation: rev.String(),
|
||||
}
|
||||
asJSON, err := json.Marshal(rev.raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out.AsJSON = string(asJSON)
|
||||
|
||||
return json.Marshal(out)
|
||||
}
|
||||
|
||||
func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error {
|
||||
in := struct {
|
||||
AsJSON string
|
||||
Representation string
|
||||
}{}
|
||||
err := json.Unmarshal(data, &in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rev.AsJSON = in.AsJSON
|
||||
rev.Representation = in.Representation
|
||||
return json.Unmarshal([]byte(in.AsJSON), &(rev.raw))
|
||||
}
|
||||
|
||||
func (rev ReportEntryValue) GobEncode() ([]byte, error) {
|
||||
return rev.MarshalJSON()
|
||||
}
|
||||
|
||||
func (rev *ReportEntryValue) GobDecode(data []byte) error {
|
||||
return rev.UnmarshalJSON(data)
|
||||
}
|
||||
|
||||
// ReportEntry captures information attached to `SpecReport` via `AddReportEntry`
|
||||
type ReportEntry struct {
|
||||
// Visibility captures the visibility policy for this ReportEntry
|
||||
Visibility ReportEntryVisibility
|
||||
// Time captures the time the AddReportEntry was called
|
||||
Time time.Time
|
||||
// Location captures the location of the AddReportEntry call
|
||||
Location CodeLocation
|
||||
// Name captures the name of this report
|
||||
Name string
|
||||
// Value captures the (optional) object passed into AddReportEntry - this can be
|
||||
// anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make
|
||||
// encoding/decoding the value easier. To access the raw value call entry.GetRawValue()
|
||||
Value ReportEntryValue
|
||||
}
|
||||
|
||||
// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation.
|
||||
type ColorableStringer interface {
|
||||
ColorableString() string
|
||||
}
|
||||
|
||||
// StringRepresentation() returns the string representation of the value associated with the ReportEntry --
|
||||
// if value is nil, empty string is returned
|
||||
// if value is a `ColorableStringer` then `Value.ColorableString()` is returned
|
||||
// if value is a `fmt.Stringer` then `Value.String()` is returned
|
||||
// otherwise the value is formatted with "%+v"
|
||||
func (entry ReportEntry) StringRepresentation() string {
|
||||
return entry.Value.String()
|
||||
}
|
||||
|
||||
// GetRawValue returns the Value object that was passed to AddReportEntry
|
||||
// If called in-process this will be the same object that was passed into AddReportEntry.
|
||||
// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
|
||||
// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
|
||||
// field yourself.
|
||||
func (entry ReportEntry) GetRawValue() interface{} {
|
||||
return entry.Value.GetRawValue()
|
||||
}
|
||||
|
||||
|
||||
|
||||
type ReportEntries []ReportEntry
|
||||
|
||||
func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool {
|
||||
for _, entry := range re {
|
||||
if entry.Visibility.Is(visibilities...) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries {
|
||||
out := ReportEntries{}
|
||||
|
||||
for _, entry := range re {
|
||||
if entry.Visibility.Is(visibilities...) {
|
||||
out = append(out, entry)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
|
||||
type ReportEntryVisibility uint
|
||||
|
||||
const (
|
||||
// Always print out this ReportEntry
|
||||
ReportEntryVisibilityAlways ReportEntryVisibility = iota
|
||||
// Only print out this ReportEntry if the spec fails or if the test is run with -v
|
||||
ReportEntryVisibilityFailureOrVerbose
|
||||
// Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.))
|
||||
ReportEntryVisibilityNever
|
||||
)
|
||||
|
||||
var revEnumSupport = NewEnumSupport(map[uint]string{
|
||||
uint(ReportEntryVisibilityAlways): "always",
|
||||
uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose",
|
||||
uint(ReportEntryVisibilityNever): "never",
|
||||
})
|
||||
|
||||
func (rev ReportEntryVisibility) String() string {
|
||||
return revEnumSupport.String(uint(rev))
|
||||
}
|
||||
func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error {
|
||||
out, err := revEnumSupport.UnmarshJSON(b)
|
||||
*rev = ReportEntryVisibility(out)
|
||||
return err
|
||||
}
|
||||
func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) {
|
||||
return revEnumSupport.MarshJSON(uint(rev))
|
||||
}
|
||||
|
||||
func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool {
|
||||
for _, visibility := range visibilities {
|
||||
if v == visibility {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
696
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
Normal file
696
vendor/github.com/onsi/ginkgo/v2/types/types.go
generated
vendored
Normal file
@@ -0,0 +1,696 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||
const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
|
||||
|
||||
// Report captures information about a Ginkgo test run
|
||||
type Report struct {
|
||||
//SuitePath captures the absolute path to the test suite
|
||||
SuitePath string
|
||||
|
||||
//SuiteDescription captures the description string passed to the DSL's RunSpecs() function
|
||||
SuiteDescription string
|
||||
|
||||
//SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function
|
||||
SuiteLabels []string
|
||||
|
||||
//SuiteSucceeded captures the success or failure status of the test run
|
||||
//If true, the test run is considered successful.
|
||||
//If false, the test run is considered unsuccessful
|
||||
SuiteSucceeded bool
|
||||
|
||||
//SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused
|
||||
//(i.e an `FIt` or an `FDescribe`
|
||||
SuiteHasProgrammaticFocus bool
|
||||
|
||||
//SpecialSuiteFailureReasons may contain special failure reasons
|
||||
//For example, a test suite might be considered "failed" even if none of the individual specs
|
||||
//have a failure state. For example, if the user has configured --fail-on-pending the test suite
|
||||
//will have failed if there are pending tests even though all non-pending tests may have passed. In such
|
||||
//cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure.
|
||||
//SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user.
|
||||
//Since multiple special failure reasons can occur, this field is a slice.
|
||||
SpecialSuiteFailureReasons []string
|
||||
|
||||
//PreRunStats contains a set of stats captured before the test run begins. This is primarily used
|
||||
//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
|
||||
//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
|
||||
PreRunStats PreRunStats
|
||||
|
||||
//StartTime and EndTime capture the start and end time of the test run
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
|
||||
//RunTime captures the duration of the test run
|
||||
RunTime time.Duration
|
||||
|
||||
//SuiteConfig captures the Ginkgo configuration governing this test run
|
||||
//SuiteConfig includes information necessary for reproducing an identical test run,
|
||||
//such as the random seed and any filters applied during the test run
|
||||
SuiteConfig SuiteConfig
|
||||
|
||||
//SpecReports is a list of all SpecReports generated by this test run
|
||||
SpecReports SpecReports
|
||||
}
|
||||
|
||||
//PreRunStats contains a set of stats captured before the test run begins. This is primarily used
|
||||
//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
|
||||
//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
|
||||
type PreRunStats struct {
|
||||
TotalSpecs int
|
||||
SpecsThatWillRun int
|
||||
}
|
||||
|
||||
//Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes
|
||||
//to form a complete final report.
|
||||
func (report Report) Add(other Report) Report {
|
||||
report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded
|
||||
|
||||
if other.StartTime.Before(report.StartTime) {
|
||||
report.StartTime = other.StartTime
|
||||
}
|
||||
|
||||
if other.EndTime.After(report.EndTime) {
|
||||
report.EndTime = other.EndTime
|
||||
}
|
||||
|
||||
specialSuiteFailureReasons := []string{}
|
||||
reasonsLookup := map[string]bool{}
|
||||
for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} {
|
||||
for _, reason := range reasons {
|
||||
if !reasonsLookup[reason] {
|
||||
reasonsLookup[reason] = true
|
||||
specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
report.SpecialSuiteFailureReasons = specialSuiteFailureReasons
|
||||
report.RunTime = report.EndTime.Sub(report.StartTime)
|
||||
|
||||
reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
|
||||
for i := range report.SpecReports {
|
||||
reports[i] = report.SpecReports[i]
|
||||
}
|
||||
offset := len(report.SpecReports)
|
||||
for i := range other.SpecReports {
|
||||
reports[i+offset] = other.SpecReports[i]
|
||||
}
|
||||
|
||||
report.SpecReports = reports
|
||||
return report
|
||||
}
|
||||
|
||||
// SpecReport captures information about a Ginkgo spec.
|
||||
type SpecReport struct {
|
||||
// ContainerHierarchyTexts is a slice containing the text strings of
|
||||
// all Describe/Context/When containers in this spec's hierarchy.
|
||||
ContainerHierarchyTexts []string
|
||||
|
||||
// ContainerHierarchyLocations is a slice containing the CodeLocations of
|
||||
// all Describe/Context/When containers in this spec's hierarchy.
|
||||
ContainerHierarchyLocations []CodeLocation
|
||||
|
||||
// ContainerHierarchyLabels is a slice containing the labels of
|
||||
// all Describe/Context/When containers in this spec's hierarchy
|
||||
ContainerHierarchyLabels [][]string
|
||||
|
||||
// LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text
|
||||
// of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
|
||||
// one of the NodeTypesForSuiteLevelNodes node types)
|
||||
LeafNodeType NodeType
|
||||
LeafNodeLocation CodeLocation
|
||||
LeafNodeLabels []string
|
||||
LeafNodeText string
|
||||
|
||||
// State captures whether the spec has passed, failed, etc.
|
||||
State SpecState
|
||||
|
||||
// IsSerial captures whether the spec has the Serial decorator
|
||||
IsSerial bool
|
||||
|
||||
// IsInOrderedContainer captures whether the spec appears in an Ordered container
|
||||
IsInOrderedContainer bool
|
||||
|
||||
// StartTime and EndTime capture the start and end time of the spec
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
|
||||
// RunTime captures the duration of the spec
|
||||
RunTime time.Duration
|
||||
|
||||
// ParallelProcess captures the parallel process that this spec ran on
|
||||
ParallelProcess int
|
||||
|
||||
//Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip())
|
||||
//It includes detailed information about the Failure
|
||||
Failure Failure
|
||||
|
||||
// NumAttempts captures the number of times this Spec was run.
|
||||
// Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
|
||||
// Repeated specs can be retried with the use of the MustPassRepeatedly decorator
|
||||
NumAttempts int
|
||||
|
||||
// MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
|
||||
MaxFlakeAttempts int
|
||||
|
||||
// MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator
|
||||
MaxMustPassRepeatedly int
|
||||
|
||||
// CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter
|
||||
CapturedGinkgoWriterOutput string
|
||||
|
||||
// CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel)
|
||||
// This is always empty when running in series or calling CurrentSpecReport()
|
||||
// It is used internally by Ginkgo's reporter
|
||||
CapturedStdOutErr string
|
||||
|
||||
// ReportEntries contains any reports added via `AddReportEntry`
|
||||
ReportEntries ReportEntries
|
||||
|
||||
// ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator
|
||||
ProgressReports []ProgressReport
|
||||
|
||||
// AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode.
|
||||
AdditionalFailures []AdditionalFailure
|
||||
}
|
||||
|
||||
func (report SpecReport) MarshalJSON() ([]byte, error) {
|
||||
//All this to avoid emitting an empty Failure struct in the JSON
|
||||
out := struct {
|
||||
ContainerHierarchyTexts []string
|
||||
ContainerHierarchyLocations []CodeLocation
|
||||
ContainerHierarchyLabels [][]string
|
||||
LeafNodeType NodeType
|
||||
LeafNodeLocation CodeLocation
|
||||
LeafNodeLabels []string
|
||||
LeafNodeText string
|
||||
State SpecState
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
RunTime time.Duration
|
||||
ParallelProcess int
|
||||
Failure *Failure `json:",omitempty"`
|
||||
NumAttempts int
|
||||
MaxFlakeAttempts int
|
||||
MaxMustPassRepeatedly int
|
||||
CapturedGinkgoWriterOutput string `json:",omitempty"`
|
||||
CapturedStdOutErr string `json:",omitempty"`
|
||||
ReportEntries ReportEntries `json:",omitempty"`
|
||||
ProgressReports []ProgressReport `json:",omitempty"`
|
||||
AdditionalFailures []AdditionalFailure `json:",omitempty"`
|
||||
}{
|
||||
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
|
||||
ContainerHierarchyLocations: report.ContainerHierarchyLocations,
|
||||
ContainerHierarchyLabels: report.ContainerHierarchyLabels,
|
||||
LeafNodeType: report.LeafNodeType,
|
||||
LeafNodeLocation: report.LeafNodeLocation,
|
||||
LeafNodeLabels: report.LeafNodeLabels,
|
||||
LeafNodeText: report.LeafNodeText,
|
||||
State: report.State,
|
||||
StartTime: report.StartTime,
|
||||
EndTime: report.EndTime,
|
||||
RunTime: report.RunTime,
|
||||
ParallelProcess: report.ParallelProcess,
|
||||
Failure: nil,
|
||||
ReportEntries: nil,
|
||||
NumAttempts: report.NumAttempts,
|
||||
MaxFlakeAttempts: report.MaxFlakeAttempts,
|
||||
MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
|
||||
CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
|
||||
CapturedStdOutErr: report.CapturedStdOutErr,
|
||||
}
|
||||
|
||||
if !report.Failure.IsZero() {
|
||||
out.Failure = &(report.Failure)
|
||||
}
|
||||
if len(report.ReportEntries) > 0 {
|
||||
out.ReportEntries = report.ReportEntries
|
||||
}
|
||||
if len(report.ProgressReports) > 0 {
|
||||
out.ProgressReports = report.ProgressReports
|
||||
}
|
||||
if len(report.AdditionalFailures) > 0 {
|
||||
out.AdditionalFailures = report.AdditionalFailures
|
||||
}
|
||||
|
||||
return json.Marshal(out)
|
||||
}
|
||||
|
||||
// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput
|
||||
// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty.
|
||||
// CombinedOutput() is used internally by Ginkgo's reporter.
|
||||
func (report SpecReport) CombinedOutput() string {
|
||||
if report.CapturedStdOutErr == "" {
|
||||
return report.CapturedGinkgoWriterOutput
|
||||
}
|
||||
if report.CapturedGinkgoWriterOutput == "" {
|
||||
return report.CapturedStdOutErr
|
||||
}
|
||||
return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput
|
||||
}
|
||||
|
||||
//Failed returns true if report.State is one of the SpecStateFailureStates
|
||||
// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted)
|
||||
func (report SpecReport) Failed() bool {
|
||||
return report.State.Is(SpecStateFailureStates)
|
||||
}
|
||||
|
||||
//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
|
||||
func (report SpecReport) FullText() string {
|
||||
texts := []string{}
|
||||
texts = append(texts, report.ContainerHierarchyTexts...)
|
||||
if report.LeafNodeText != "" {
|
||||
texts = append(texts, report.LeafNodeText)
|
||||
}
|
||||
return strings.Join(texts, " ")
|
||||
}
|
||||
|
||||
//Labels returns a deduped set of all the spec's Labels.
|
||||
func (report SpecReport) Labels() []string {
|
||||
out := []string{}
|
||||
seen := map[string]bool{}
|
||||
for _, labels := range report.ContainerHierarchyLabels {
|
||||
for _, label := range labels {
|
||||
if !seen[label] {
|
||||
seen[label] = true
|
||||
out = append(out, label)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, label := range report.LeafNodeLabels {
|
||||
if !seen[label] {
|
||||
seen[label] = true
|
||||
out = append(out, label)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
|
||||
func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
|
||||
filter, err := ParseLabelFilter(query)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return filter(report.Labels()), nil
|
||||
}
|
||||
|
||||
//FileName() returns the name of the file containing the spec
|
||||
func (report SpecReport) FileName() string {
|
||||
return report.LeafNodeLocation.FileName
|
||||
}
|
||||
|
||||
//LineNumber() returns the line number of the leaf node
|
||||
func (report SpecReport) LineNumber() int {
|
||||
return report.LeafNodeLocation.LineNumber
|
||||
}
|
||||
|
||||
//FailureMessage() returns the failure message (or empty string if the test hasn't failed)
|
||||
func (report SpecReport) FailureMessage() string {
|
||||
return report.Failure.Message
|
||||
}
|
||||
|
||||
//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed)
|
||||
func (report SpecReport) FailureLocation() CodeLocation {
|
||||
return report.Failure.Location
|
||||
}
|
||||
|
||||
type SpecReports []SpecReport
|
||||
|
||||
//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes
|
||||
func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports {
|
||||
count := 0
|
||||
for i := range reports {
|
||||
if reports[i].LeafNodeType.Is(nodeTypes) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
out := make(SpecReports, count)
|
||||
j := 0
|
||||
for i := range reports {
|
||||
if reports[i].LeafNodeType.Is(nodeTypes) {
|
||||
out[j] = reports[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
//WithState returns the subset of SpecReports with State matching one of the requested SpecStates
|
||||
func (reports SpecReports) WithState(states SpecState) SpecReports {
|
||||
count := 0
|
||||
for i := range reports {
|
||||
if reports[i].State.Is(states) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
out, j := make(SpecReports, count), 0
|
||||
for i := range reports {
|
||||
if reports[i].State.Is(states) {
|
||||
out[j] = reports[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates
|
||||
func (reports SpecReports) CountWithState(states SpecState) int {
|
||||
n := 0
|
||||
for i := range reports {
|
||||
if reports[i].State.Is(states) {
|
||||
n += 1
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
//If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts.
|
||||
func (reports SpecReports) CountOfFlakedSpecs() int {
|
||||
n := 0
|
||||
for i := range reports {
|
||||
if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 {
|
||||
n += 1
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
//If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts
|
||||
func (reports SpecReports) CountOfRepeatedSpecs() int {
|
||||
n := 0
|
||||
for i := range reports {
|
||||
if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 {
|
||||
n += 1
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Failure captures failure information for an individual test
|
||||
type Failure struct {
|
||||
// Message - the failure message passed into Fail(...). When using a matcher library
|
||||
// like Gomega, this will contain the failure message generated by Gomega.
|
||||
//
|
||||
// Message is also populated if the user has called Skip(...).
|
||||
Message string
|
||||
|
||||
// Location - the CodeLocation where the failure occurred
|
||||
// This CodeLocation will include a fully-populated StackTrace
|
||||
Location CodeLocation
|
||||
|
||||
// ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked)
|
||||
// then ForwardedPanic will be populated with a string representation of the captured panic.
|
||||
ForwardedPanic string `json:",omitempty"`
|
||||
|
||||
// FailureNodeContext - one of three contexts describing the node in which the failure occurred:
|
||||
// FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated
|
||||
// FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated.
|
||||
// FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated.
|
||||
//
|
||||
// FailureNodeType will contain the NodeType of the node in which the failure occurred.
|
||||
// FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred.
|
||||
// If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred.
|
||||
FailureNodeContext FailureNodeContext
|
||||
FailureNodeType NodeType
|
||||
FailureNodeLocation CodeLocation
|
||||
FailureNodeContainerIndex int
|
||||
|
||||
//ProgressReport is populated if the spec was interrupted or timed out
|
||||
ProgressReport ProgressReport
|
||||
}
|
||||
|
||||
func (f Failure) IsZero() bool {
|
||||
return f.Message == "" && (f.Location == CodeLocation{})
|
||||
}
|
||||
|
||||
// FailureNodeContext captures the location context for the node containing the failing line of code
|
||||
type FailureNodeContext uint
|
||||
|
||||
const (
|
||||
FailureNodeContextInvalid FailureNodeContext = iota
|
||||
|
||||
FailureNodeIsLeafNode
|
||||
FailureNodeAtTopLevel
|
||||
FailureNodeInContainer
|
||||
)
|
||||
|
||||
var fncEnumSupport = NewEnumSupport(map[uint]string{
|
||||
uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT",
|
||||
uint(FailureNodeIsLeafNode): "leaf-node",
|
||||
uint(FailureNodeAtTopLevel): "top-level",
|
||||
uint(FailureNodeInContainer): "in-container",
|
||||
})
|
||||
|
||||
func (fnc FailureNodeContext) String() string {
|
||||
return fncEnumSupport.String(uint(fnc))
|
||||
}
|
||||
func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error {
|
||||
out, err := fncEnumSupport.UnmarshJSON(b)
|
||||
*fnc = FailureNodeContext(out)
|
||||
return err
|
||||
}
|
||||
func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) {
|
||||
return fncEnumSupport.MarshJSON(uint(fnc))
|
||||
}
|
||||
|
||||
// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec
|
||||
// these typically occur in clean up nodes after the spec has failed.
|
||||
// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is
|
||||
type AdditionalFailure struct {
|
||||
State SpecState
|
||||
Failure Failure
|
||||
}
|
||||
|
||||
// SpecState captures the state of a spec
|
||||
// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)`
|
||||
type SpecState uint
|
||||
|
||||
const (
|
||||
SpecStateInvalid SpecState = 0
|
||||
|
||||
SpecStatePending SpecState = 1 << iota
|
||||
SpecStateSkipped
|
||||
SpecStatePassed
|
||||
SpecStateFailed
|
||||
SpecStateAborted
|
||||
SpecStatePanicked
|
||||
SpecStateInterrupted
|
||||
SpecStateTimedout
|
||||
)
|
||||
|
||||
var ssEnumSupport = NewEnumSupport(map[uint]string{
|
||||
uint(SpecStateInvalid): "INVALID SPEC STATE",
|
||||
uint(SpecStatePending): "pending",
|
||||
uint(SpecStateSkipped): "skipped",
|
||||
uint(SpecStatePassed): "passed",
|
||||
uint(SpecStateFailed): "failed",
|
||||
uint(SpecStateAborted): "aborted",
|
||||
uint(SpecStatePanicked): "panicked",
|
||||
uint(SpecStateInterrupted): "interrupted",
|
||||
uint(SpecStateTimedout): "timedout",
|
||||
})
|
||||
|
||||
func (ss SpecState) String() string {
|
||||
return ssEnumSupport.String(uint(ss))
|
||||
}
|
||||
func (ss *SpecState) UnmarshalJSON(b []byte) error {
|
||||
out, err := ssEnumSupport.UnmarshJSON(b)
|
||||
*ss = SpecState(out)
|
||||
return err
|
||||
}
|
||||
func (ss SpecState) MarshalJSON() ([]byte, error) {
|
||||
return ssEnumSupport.MarshJSON(uint(ss))
|
||||
}
|
||||
|
||||
var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted
|
||||
|
||||
func (ss SpecState) Is(states SpecState) bool {
|
||||
return ss&states != 0
|
||||
}
|
||||
|
||||
// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace
|
||||
type ProgressReport struct {
|
||||
Message string
|
||||
ParallelProcess int
|
||||
RunningInParallel bool
|
||||
|
||||
Time time.Time
|
||||
|
||||
ContainerHierarchyTexts []string
|
||||
LeafNodeText string
|
||||
LeafNodeLocation CodeLocation
|
||||
SpecStartTime time.Time
|
||||
|
||||
CurrentNodeType NodeType
|
||||
CurrentNodeText string
|
||||
CurrentNodeLocation CodeLocation
|
||||
CurrentNodeStartTime time.Time
|
||||
|
||||
CurrentStepText string
|
||||
CurrentStepLocation CodeLocation
|
||||
CurrentStepStartTime time.Time
|
||||
|
||||
AdditionalReports []string
|
||||
|
||||
CapturedGinkgoWriterOutput string `json:",omitempty"`
|
||||
GinkgoWriterOffset int
|
||||
|
||||
Goroutines []Goroutine
|
||||
}
|
||||
|
||||
func (pr ProgressReport) IsZero() bool {
|
||||
return pr.CurrentNodeType == NodeTypeInvalid
|
||||
}
|
||||
|
||||
func (pr ProgressReport) SpecGoroutine() Goroutine {
|
||||
for _, goroutine := range pr.Goroutines {
|
||||
if goroutine.IsSpecGoroutine {
|
||||
return goroutine
|
||||
}
|
||||
}
|
||||
return Goroutine{}
|
||||
}
|
||||
|
||||
func (pr ProgressReport) HighlightedGoroutines() []Goroutine {
|
||||
out := []Goroutine{}
|
||||
for _, goroutine := range pr.Goroutines {
|
||||
if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() {
|
||||
continue
|
||||
}
|
||||
out = append(out, goroutine)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (pr ProgressReport) OtherGoroutines() []Goroutine {
|
||||
out := []Goroutine{}
|
||||
for _, goroutine := range pr.Goroutines {
|
||||
if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
|
||||
continue
|
||||
}
|
||||
out = append(out, goroutine)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport {
|
||||
out := pr
|
||||
out.CapturedGinkgoWriterOutput = ""
|
||||
return out
|
||||
}
|
||||
|
||||
type Goroutine struct {
|
||||
ID uint64
|
||||
State string
|
||||
Stack []FunctionCall
|
||||
IsSpecGoroutine bool
|
||||
}
|
||||
|
||||
func (g Goroutine) IsZero() bool {
|
||||
return g.ID == 0
|
||||
}
|
||||
|
||||
func (g Goroutine) HasHighlights() bool {
|
||||
for _, fc := range g.Stack {
|
||||
if fc.Highlight {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type FunctionCall struct {
|
||||
Function string
|
||||
Filename string
|
||||
Line int
|
||||
Highlight bool `json:",omitempty"`
|
||||
Source []string `json:",omitempty"`
|
||||
SourceHighlight int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NodeType captures the type of a given Ginkgo Node
|
||||
type NodeType uint
|
||||
|
||||
const (
|
||||
NodeTypeInvalid NodeType = 0
|
||||
|
||||
NodeTypeContainer NodeType = 1 << iota
|
||||
NodeTypeIt
|
||||
|
||||
NodeTypeBeforeEach
|
||||
NodeTypeJustBeforeEach
|
||||
NodeTypeAfterEach
|
||||
NodeTypeJustAfterEach
|
||||
|
||||
NodeTypeBeforeAll
|
||||
NodeTypeAfterAll
|
||||
|
||||
NodeTypeBeforeSuite
|
||||
NodeTypeSynchronizedBeforeSuite
|
||||
NodeTypeAfterSuite
|
||||
NodeTypeSynchronizedAfterSuite
|
||||
|
||||
NodeTypeReportBeforeEach
|
||||
NodeTypeReportAfterEach
|
||||
NodeTypeReportAfterSuite
|
||||
|
||||
NodeTypeCleanupInvalid
|
||||
NodeTypeCleanupAfterEach
|
||||
NodeTypeCleanupAfterAll
|
||||
NodeTypeCleanupAfterSuite
|
||||
)
|
||||
|
||||
var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt
|
||||
var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite
|
||||
var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite
|
||||
var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportAfterSuite
|
||||
|
||||
var ntEnumSupport = NewEnumSupport(map[uint]string{
|
||||
uint(NodeTypeInvalid): "INVALID NODE TYPE",
|
||||
uint(NodeTypeContainer): "Container",
|
||||
uint(NodeTypeIt): "It",
|
||||
uint(NodeTypeBeforeEach): "BeforeEach",
|
||||
uint(NodeTypeJustBeforeEach): "JustBeforeEach",
|
||||
uint(NodeTypeAfterEach): "AfterEach",
|
||||
uint(NodeTypeJustAfterEach): "JustAfterEach",
|
||||
uint(NodeTypeBeforeAll): "BeforeAll",
|
||||
uint(NodeTypeAfterAll): "AfterAll",
|
||||
uint(NodeTypeBeforeSuite): "BeforeSuite",
|
||||
uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite",
|
||||
uint(NodeTypeAfterSuite): "AfterSuite",
|
||||
uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite",
|
||||
uint(NodeTypeReportBeforeEach): "ReportBeforeEach",
|
||||
uint(NodeTypeReportAfterEach): "ReportAfterEach",
|
||||
uint(NodeTypeReportAfterSuite): "ReportAfterSuite",
|
||||
uint(NodeTypeCleanupInvalid): "DeferCleanup",
|
||||
uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)",
|
||||
uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)",
|
||||
uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)",
|
||||
})
|
||||
|
||||
func (nt NodeType) String() string {
|
||||
return ntEnumSupport.String(uint(nt))
|
||||
}
|
||||
func (nt *NodeType) UnmarshalJSON(b []byte) error {
|
||||
out, err := ntEnumSupport.UnmarshJSON(b)
|
||||
*nt = NodeType(out)
|
||||
return err
|
||||
}
|
||||
func (nt NodeType) MarshalJSON() ([]byte, error) {
|
||||
return ntEnumSupport.MarshJSON(uint(nt))
|
||||
}
|
||||
|
||||
func (nt NodeType) Is(nodeTypes NodeType) bool {
|
||||
return nt&nodeTypes != 0
|
||||
}
|
3
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
Normal file
3
vendor/github.com/onsi/ginkgo/v2/types/version.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
package types
|
||||
|
||||
const VERSION = "2.4.0"
|
Reference in New Issue
Block a user