forked from sourcegraph/conc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathresult_pool.go
142 lines (122 loc) · 3.65 KB
/
result_pool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
package pool
import (
"context"
"sort"
"sync"
)
// NewWithResults creates a new ResultPool for tasks with a result of type T.
//
// The configuration methods (With*) will panic if they are used after calling
// Go() for the first time.
func NewWithResults[T any]() *ResultPool[T] {
return &ResultPool[T]{
pool: *New(),
}
}
// ResultPool is a pool that executes tasks that return a generic result type.
// Tasks are executed in the pool with Go(), then the results of the tasks are
// returned by Wait().
//
// The order of the results is guaranteed to be the same as the order the
// tasks were submitted.
type ResultPool[T any] struct {
pool Pool
agg resultAggregator[T]
}
// Go submits a task to the pool. If all goroutines in the pool
// are busy, a call to Go() will block until the task can be started.
func (p *ResultPool[T]) Go(f func() T) {
idx := p.agg.nextIndex()
p.pool.Go(func() {
p.agg.save(idx, f(), false)
})
}
// Wait cleans up all spawned goroutines, propagating any panics, and returning
// a slice of results from tasks that did not panic.
func (p *ResultPool[T]) Wait() []T {
p.pool.Wait()
results := p.agg.collect(true)
p.agg = resultAggregator[T]{} // reset for reuse
return results
}
// MaxGoroutines returns the maximum size of the pool.
func (p *ResultPool[T]) MaxGoroutines() int {
return p.pool.MaxGoroutines()
}
// WithErrors converts the pool to an ResultErrorPool so the submitted tasks
// can return errors.
func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] {
p.panicIfInitialized()
return &ResultErrorPool[T]{
errorPool: *p.pool.WithErrors(),
}
}
// WithContext converts the pool to a ResultContextPool for tasks that should
// run under the same context, such that they each respect shared cancellation.
// For example, WithCancelOnError can be configured on the returned pool to
// signal that all goroutines should be cancelled upon the first error.
func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] {
p.panicIfInitialized()
return &ResultContextPool[T]{
contextPool: *p.pool.WithContext(ctx),
}
}
// WithMaxGoroutines limits the number of goroutines in a pool.
// Defaults to unlimited. Panics if n < 1.
func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] {
p.panicIfInitialized()
p.pool.WithMaxGoroutines(n)
return p
}
func (p *ResultPool[T]) panicIfInitialized() {
p.pool.panicIfInitialized()
}
// resultAggregator is a utility type that lets us safely append from multiple
// goroutines. The zero value is valid and ready to use.
type resultAggregator[T any] struct {
mu sync.Mutex
len int
results []T
errored []int
}
// nextIndex reserves a slot for a result. The returned value should be passed
// to save() when adding a result to the aggregator.
func (r *resultAggregator[T]) nextIndex() int {
r.mu.Lock()
defer r.mu.Unlock()
nextIdx := r.len
r.len += 1
return nextIdx
}
func (r *resultAggregator[T]) save(i int, res T, errored bool) {
r.mu.Lock()
defer r.mu.Unlock()
if i >= len(r.results) {
old := r.results
r.results = make([]T, r.len)
copy(r.results, old)
}
r.results[i] = res
if errored {
r.errored = append(r.errored, i)
}
}
// collect returns the set of aggregated results.
func (r *resultAggregator[T]) collect(collectErrored bool) []T {
if !r.mu.TryLock() {
panic("collect should not be called until all goroutines have exited")
}
if collectErrored || len(r.errored) == 0 {
return r.results
}
filtered := r.results[:0]
sort.Ints(r.errored)
for i, e := range r.errored {
if i == 0 {
filtered = append(filtered, r.results[:e]...)
} else {
filtered = append(filtered, r.results[r.errored[i-1]+1:e]...)
}
}
return filtered
}