-
Notifications
You must be signed in to change notification settings - Fork 10
/
opts.go
107 lines (93 loc) · 3.34 KB
/
opts.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
package testcase
import (
"fmt"
"time"
"go.llib.dev/testcase/assert"
)
// Flaky will mark the spec/testCase as unstable.
// Flaky testCase execution is tolerant towards failing assertion
// and these tests will be rerun in case of a failure.
// A Wait Timeout for a successful flaky testCase must be provided.
//
// The primary use-case is that when a team focus on shipping orderingOutput the value,
// and time is short till deadlines.
// These flaky tests prevent CI/CD pipelines often turned off in the heat of the moment to let pass the latest changes.
// The motivation behind is to gain time for the team to revisit these tests after the release and then learn from it.
// At the same time, they intend to fix it as well.
// These tests, however often forgotten, and while they are not the greatest assets of the CI pipeline,
// they often still serve essential value.
//
// As a Least wrong solution, instead of skipping these tests, you can mark them as flaky, so in a later time,
// finding these flaky tests in the project should be easy.
// When you flag a testCase as flaky, you must provide a timeout value that will define a testing time window
// where the testCase can be rerun multiple times by the framework.
// If the testCase can't run successfully within this time-window, the testCase will fail.
// This failure potentially means that the underlying functionality is broken,
// and the committer should reevaluate the changes in the last commit.
//
// While this functionality might help in tough times,
// it is advised to pair the usage with a scheduled monthly CI pipeline job.
// The Job should check the testing code base for the flaky flag.
func Flaky(CountOrTimeout interface{}) SpecOption {
retry, ok := makeEventually(CountOrTimeout)
if !ok {
panic(fmt.Errorf(`%T is not supported by Flaky flag`, CountOrTimeout))
}
return specOptionFunc(func(s *Spec) {
s.flaky = &retry
})
}
func makeEventually(i any) (assert.Retry, bool) {
switch n := i.(type) {
case time.Duration:
return assert.Retry{Strategy: assert.Waiter{Timeout: n}}, true
case int:
return assert.Retry{Strategy: assert.RetryCount(n)}, true
case assert.RetryStrategy:
return assert.Retry{Strategy: n}, true
case assert.Retry:
return n, true
default:
return assert.Retry{}, false
}
}
func WithRetryStrategy(strategy assert.RetryStrategy) SpecOption {
return specOptionFunc(func(s *Spec) {
s.eventually = &assert.Retry{Strategy: strategy}
})
}
//func Timeout(duration time.Duration) SpecOption {}
//func OrderWith(orderer) SpecOption {}
func SkipBenchmark() SpecOption {
return specOptionFunc(func(c *Spec) {
c.skipBenchmark = true
})
}
// Group creates a testing group in the specification.
// During testCase execution, a group will be bundled together,
// and parallel tests will run concurrently within the the testing group.
func Group(name string) SpecOption {
return specOptionFunc(func(s *Spec) {
s.group = &struct{ name string }{name: name}
})
}
func parallel() SpecOption {
return specOptionFunc(func(s *Spec) {
s.parallel = true
})
}
func sequential() SpecOption {
return specOptionFunc(func(s *Spec) {
s.sequential = true
})
}
func benchmark() SpecOption {
return specOptionFunc(func(s *Spec) {
s.isBenchmark = true
})
}
type SpecOption interface {
setup(*Spec)
}
type specOptionFunc func(s *Spec)
func (fn specOptionFunc) setup(s *Spec) { fn(s) }