Remove unused dependency
This commit is contained in:
parent
a8d2f0244e
commit
3dfbf385e4
18 changed files with 1 additions and 2355 deletions
8
Gopkg.lock
generated
8
Gopkg.lock
generated
|
@ -397,12 +397,6 @@
|
|||
revision = "629574ca2a5df945712d3079857300b5e4da0236"
|
||||
version = "v1.4.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/go-playground/pool.v3"
|
||||
packages = ["."]
|
||||
revision = "e73cd3a5ded835540c5cf4778488579c5b357d68"
|
||||
version = "v3.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
|
@ -466,6 +460,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "cb7f34d9108be87b2601c0d4acbd6faa41bdf211a082760fa1b4d4300bf3e959"
|
||||
inputs-digest = "a9b639a7cd7adfd469612e825628014f2deacaa54a35e65255db098794a0992c"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
27
vendor/gopkg.in/go-playground/pool.v3/.gitignore
generated
vendored
27
vendor/gopkg.in/go-playground/pool.v3/.gitignore
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
pool
|
||||
old.txt
|
||||
new.txt
|
22
vendor/gopkg.in/go-playground/pool.v3/LICENSE
generated
vendored
22
vendor/gopkg.in/go-playground/pool.v3/LICENSE
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Dean Karn
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
276
vendor/gopkg.in/go-playground/pool.v3/README.md
generated
vendored
276
vendor/gopkg.in/go-playground/pool.v3/README.md
generated
vendored
|
@ -1,276 +0,0 @@
|
|||
Package pool
|
||||
============
|
||||
|
||||

|
||||
[](https://semaphoreci.com/joeybloggs/pool)
|
||||
[](https://coveralls.io/github/go-playground/pool?branch=v3)
|
||||
[](https://goreportcard.com/report/gopkg.in/go-playground/pool.v3)
|
||||
[](https://godoc.org/gopkg.in/go-playground/pool.v3)
|
||||

|
||||
|
||||
Package pool implements a limited consumer goroutine or unlimited goroutine pool for easier goroutine handling and cancellation.
|
||||
|
||||
Features:
|
||||
|
||||
- Dead simple to use and makes no assumptions about how you will use it.
|
||||
- Automatic recovery from consumer goroutines which returns an error to the results
|
||||
|
||||
Pool v2 advantages over Pool v1:
|
||||
|
||||
- Up to 300% faster due to lower contention ( BenchmarkSmallRun used to take 3 seconds, now 1 second )
|
||||
- Cancels are much faster
|
||||
- Easier to use, no longer need to know the # of Work Units to be processed.
|
||||
- Pool can now be used as a long running/globally defined pool if desired ( v1 Pool was only good for one run )
|
||||
- Supports single units of work as well as batching
|
||||
- Pool can easily be reset after a Close() or Cancel() for reuse.
|
||||
- Multiple Batches can be run and even cancelled on the same Pool.
|
||||
- Supports individual Work Unit cancellation.
|
||||
|
||||
Pool v3 advantages over Pool v2:
|
||||
|
||||
- Objects are not interfaces allowing for less breaking changes going forward.
|
||||
- Now there are 2 Pool types, both completely interchangeable, a limited worker pool and unlimited pool.
|
||||
- Simpler usage of Work Units, instead of `<-work.Done` now can do `work.Wait()`
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Use go get.
|
||||
|
||||
go get gopkg.in/go-playground/pool.v3
|
||||
|
||||
Then import the pool package into your own code.
|
||||
|
||||
import "gopkg.in/go-playground/pool.v3"
|
||||
|
||||
|
||||
Important Information READ THIS!
|
||||
------
|
||||
|
||||
- It is recommended that you cancel a pool or batch from the calling function and not inside of the Unit of Work, it will work fine, however because of the goroutine scheduler and context switching it may not cancel as soon as if called from outside.
|
||||
- When Batching DO NOT FORGET TO CALL batch.QueueComplete(), if you do the Batch WILL deadlock
|
||||
- It is your responsibility to call WorkUnit.IsCancelled() to check if it's cancelled after a blocking operation like waiting for a connection from a pool. (optional)
|
||||
|
||||
Usage and documentation
|
||||
------
|
||||
|
||||
Please see http://godoc.org/gopkg.in/go-playground/pool.v3 for detailed usage docs.
|
||||
|
||||
##### Examples:
|
||||
|
||||
both Limited Pool and Unlimited Pool have the same signatures and are completely interchangeable.
|
||||
|
||||
Per Unit Work
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gopkg.in/go-playground/pool.v3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
p := pool.NewLimited(10)
|
||||
defer p.Close()
|
||||
|
||||
user := p.Queue(getUser(13))
|
||||
other := p.Queue(getOtherInfo(13))
|
||||
|
||||
user.Wait()
|
||||
if err := user.Error(); err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// do stuff with user
|
||||
username := user.Value().(string)
|
||||
fmt.Println(username)
|
||||
|
||||
other.Wait()
|
||||
if err := other.Error(); err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// do stuff with other
|
||||
otherInfo := other.Value().(string)
|
||||
fmt.Println(otherInfo)
|
||||
}
|
||||
|
||||
func getUser(id int) pool.WorkFunc {
|
||||
|
||||
return func(wu pool.WorkUnit) (interface{}, error) {
|
||||
|
||||
// simulate waiting for something, like TCP connection to be established
|
||||
// or connection from pool grabbed
|
||||
time.Sleep(time.Second * 1)
|
||||
|
||||
if wu.IsCancelled() {
|
||||
// return values not used
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ready for processing...
|
||||
|
||||
return "Joeybloggs", nil
|
||||
}
|
||||
}
|
||||
|
||||
func getOtherInfo(id int) pool.WorkFunc {
|
||||
|
||||
return func(wu pool.WorkUnit) (interface{}, error) {
|
||||
|
||||
// simulate waiting for something, like TCP connection to be established
|
||||
// or connection from pool grabbed
|
||||
time.Sleep(time.Second * 1)
|
||||
|
||||
if wu.IsCancelled() {
|
||||
// return values not used
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ready for processing...
|
||||
|
||||
return "Other Info", nil
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Batch Work
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gopkg.in/go-playground/pool.v3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
p := pool.NewLimited(10)
|
||||
defer p.Close()
|
||||
|
||||
batch := p.Batch()
|
||||
|
||||
// for max speed Queue in another goroutine
|
||||
// but it is not required, just can't start reading results
|
||||
// until all items are Queued.
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 10; i++ {
|
||||
batch.Queue(sendEmail("email content"))
|
||||
}
|
||||
|
||||
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
|
||||
// if calling Cancel() it calles QueueComplete() internally
|
||||
batch.QueueComplete()
|
||||
}()
|
||||
|
||||
for email := range batch.Results() {
|
||||
|
||||
if err := email.Error(); err != nil {
|
||||
// handle error
|
||||
// maybe call batch.Cancel()
|
||||
}
|
||||
|
||||
// use return value
|
||||
fmt.Println(email.Value().(bool))
|
||||
}
|
||||
}
|
||||
|
||||
func sendEmail(email string) pool.WorkFunc {
|
||||
return func(wu pool.WorkUnit) (interface{}, error) {
|
||||
|
||||
// simulate waiting for something, like TCP connection to be established
|
||||
// or connection from pool grabbed
|
||||
time.Sleep(time.Second * 1)
|
||||
|
||||
if wu.IsCancelled() {
|
||||
// return values not used
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ready for processing...
|
||||
|
||||
return true, nil // everything ok, send nil, error if not
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Benchmarks
|
||||
------
|
||||
###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go 1.6.2
|
||||
|
||||
run with 1, 2, 4,8 and 16 cpu to show it scales well...16 is double the # of logical cores on this machine.
|
||||
|
||||
NOTE: Cancellation times CAN vary depending how busy your system is and how the goroutine scheduler is but
|
||||
worse case I've seen is 1s to cancel instead of 0ns
|
||||
|
||||
```go
|
||||
go test -cpu=1,2,4,8,16 -bench=. -benchmem=true
|
||||
PASS
|
||||
BenchmarkLimitedSmallRun 1 1002492008 ns/op 3552 B/op 55 allocs/op
|
||||
BenchmarkLimitedSmallRun-2 1 1002347196 ns/op 3568 B/op 55 allocs/op
|
||||
BenchmarkLimitedSmallRun-4 1 1010533571 ns/op 4720 B/op 73 allocs/op
|
||||
BenchmarkLimitedSmallRun-8 1 1008883324 ns/op 4080 B/op 63 allocs/op
|
||||
BenchmarkLimitedSmallRun-16 1 1002317677 ns/op 3632 B/op 56 allocs/op
|
||||
BenchmarkLimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-8 1000000 1006 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun 1 4027153081 ns/op 36176 B/op 572 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-2 1 4003489261 ns/op 32336 B/op 512 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-4 1 4005579847 ns/op 34128 B/op 540 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-8 1 4004639857 ns/op 34992 B/op 553 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-16 1 4022695297 ns/op 36864 B/op 532 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun 1 1000785511 ns/op 6336 B/op 94 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-2 1 1001459945 ns/op 4480 B/op 65 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-4 1 1002475371 ns/op 6672 B/op 99 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-8 1 1002498902 ns/op 4624 B/op 67 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-16 1 1002202273 ns/op 5344 B/op 78 allocs/op
|
||||
BenchmarkUnlimitedSmallRun 1 1002361538 ns/op 3696 B/op 59 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-2 1 1002230293 ns/op 3776 B/op 60 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-4 1 1002148953 ns/op 3776 B/op 60 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-8 1 1002120679 ns/op 3584 B/op 57 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-16 1 1001698519 ns/op 3968 B/op 63 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeRun 1 1001631711 ns/op 40352 B/op 603 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-2 1 1002603908 ns/op 38304 B/op 586 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-4 1 1001452975 ns/op 38192 B/op 584 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-8 1 1005382882 ns/op 35200 B/op 537 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-16 1 1001818482 ns/op 37056 B/op 566 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun 1 1002391247 ns/op 4240 B/op 63 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-2 1 1010313222 ns/op 4688 B/op 70 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-4 1 1008364651 ns/op 4304 B/op 64 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-8 1 1001858192 ns/op 4448 B/op 66 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-16 1 1001228000 ns/op 4320 B/op 64 allocs/op
|
||||
```
|
||||
To put some of these benchmarks in perspective:
|
||||
|
||||
- BenchmarkLimitedSmallRun did 10 seconds worth of processing in 1.002492008s
|
||||
- BenchmarkLimitedSmallCancel ran 20 jobs, cancelled on job 6 and and ran in 0s
|
||||
- BenchmarkLimitedLargeCancel ran 1000 jobs, cancelled on job 6 and and ran in 0s
|
||||
- BenchmarkLimitedOverconsumeLargeRun ran 100 jobs using 25 workers in 4.027153081s
|
||||
|
||||
|
||||
License
|
||||
------
|
||||
Distributed under MIT License, please see license file in code for more details.
|
131
vendor/gopkg.in/go-playground/pool.v3/batch.go
generated
vendored
131
vendor/gopkg.in/go-playground/pool.v3/batch.go
generated
vendored
|
@ -1,131 +0,0 @@
|
|||
package pool
|
||||
|
||||
import "sync"
|
||||
|
||||
// Batch contains all information for a batch run of WorkUnits
|
||||
type Batch interface {
|
||||
|
||||
// Queue queues the work to be run in the pool and starts processing immediately
|
||||
// and also retains a reference for Cancellation and outputting to results.
|
||||
// WARNING be sure to call QueueComplete() once all work has been Queued.
|
||||
Queue(fn WorkFunc)
|
||||
|
||||
// QueueComplete lets the batch know that there will be no more Work Units Queued
|
||||
// so that it may close the results channels once all work is completed.
|
||||
// WARNING: if this function is not called the results channel will never exhaust,
|
||||
// but block forever listening for more results.
|
||||
QueueComplete()
|
||||
|
||||
// Cancel cancels the Work Units belonging to this Batch
|
||||
Cancel()
|
||||
|
||||
// Results returns a Work Unit result channel that will output all
|
||||
// completed units of work.
|
||||
Results() <-chan WorkUnit
|
||||
|
||||
// WaitAll is an alternative to Results() where you
|
||||
// may want/need to wait until all work has been
|
||||
// processed, but don't need to check results.
|
||||
// eg. individual units of work may handle their own
|
||||
// errors, logging...
|
||||
WaitAll()
|
||||
}
|
||||
|
||||
// batch contains all information for a batch run of WorkUnits
|
||||
type batch struct {
|
||||
pool Pool
|
||||
m sync.Mutex
|
||||
units []WorkUnit
|
||||
results chan WorkUnit
|
||||
done chan struct{}
|
||||
closed bool
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
func newBatch(p Pool) Batch {
|
||||
return &batch{
|
||||
pool: p,
|
||||
units: make([]WorkUnit, 0, 4), // capacity it to 4 so it doesn't grow and allocate too many times.
|
||||
results: make(chan WorkUnit),
|
||||
done: make(chan struct{}),
|
||||
wg: new(sync.WaitGroup),
|
||||
}
|
||||
}
|
||||
|
||||
// Queue queues the work to be run in the pool and starts processing immediately
|
||||
// and also retains a reference for Cancellation and outputting to results.
|
||||
// WARNING be sure to call QueueComplete() once all work has been Queued.
|
||||
func (b *batch) Queue(fn WorkFunc) {
|
||||
|
||||
b.m.Lock()
|
||||
|
||||
if b.closed {
|
||||
b.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
wu := b.pool.Queue(fn)
|
||||
|
||||
b.units = append(b.units, wu) // keeping a reference for cancellation purposes
|
||||
b.wg.Add(1)
|
||||
b.m.Unlock()
|
||||
|
||||
go func(b *batch, wu WorkUnit) {
|
||||
wu.Wait()
|
||||
b.results <- wu
|
||||
b.wg.Done()
|
||||
}(b, wu)
|
||||
}
|
||||
|
||||
// QueueComplete lets the batch know that there will be no more Work Units Queued
|
||||
// so that it may close the results channels once all work is completed.
|
||||
// WARNING: if this function is not called the results channel will never exhaust,
|
||||
// but block forever listening for more results.
|
||||
func (b *batch) QueueComplete() {
|
||||
b.m.Lock()
|
||||
b.closed = true
|
||||
close(b.done)
|
||||
b.m.Unlock()
|
||||
}
|
||||
|
||||
// Cancel cancels the Work Units belonging to this Batch
|
||||
func (b *batch) Cancel() {
|
||||
|
||||
b.QueueComplete() // no more to be added
|
||||
|
||||
b.m.Lock()
|
||||
|
||||
// go in reverse order to try and cancel as many as possbile
|
||||
// one at end are less likely to have run than those at the beginning
|
||||
for i := len(b.units) - 1; i >= 0; i-- {
|
||||
b.units[i].Cancel()
|
||||
}
|
||||
|
||||
b.m.Unlock()
|
||||
}
|
||||
|
||||
// Results returns a Work Unit result channel that will output all
|
||||
// completed units of work.
|
||||
func (b *batch) Results() <-chan WorkUnit {
|
||||
|
||||
go func(b *batch) {
|
||||
<-b.done
|
||||
b.m.Lock()
|
||||
b.wg.Wait()
|
||||
b.m.Unlock()
|
||||
close(b.results)
|
||||
}(b)
|
||||
|
||||
return b.results
|
||||
}
|
||||
|
||||
// WaitAll is an alternative to Results() where you
|
||||
// may want/need to wait until all work has been
|
||||
// processed, but don't need to check results.
|
||||
// eg. individual units of work may handle their own
|
||||
// errors and logging...
|
||||
func (b *batch) WaitAll() {
|
||||
|
||||
for range b.Results() {
|
||||
}
|
||||
}
|
172
vendor/gopkg.in/go-playground/pool.v3/batch_limited_test.go
generated
vendored
172
vendor/gopkg.in/go-playground/pool.v3/batch_limited_test.go
generated
vendored
|
@ -1,172 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/go-playground/assert.v1"
|
||||
)
|
||||
|
||||
// NOTES:
|
||||
// - Run "go test" to run tests
|
||||
// - Run "gocov test | gocov report" to report on test converage by file
|
||||
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
|
||||
//
|
||||
// or
|
||||
//
|
||||
// -- may be a good idea to change to output path to somewherelike /tmp
|
||||
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
|
||||
//
|
||||
|
||||
func TestLimitedBatch(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := NewLimited(4)
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 4)
|
||||
}
|
||||
|
||||
func TestLimitedBatchGlobalPool(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
batch := limitedGpool.Batch()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 4)
|
||||
}
|
||||
|
||||
func TestLimitedBatchCancelItemsThrownAway(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := NewLimited(4)
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 40; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
}()
|
||||
|
||||
batch.Cancel()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
NotEqual(t, count, 40)
|
||||
}
|
||||
|
||||
func TestLimitedBatchCancelItemsCancelledAfterward(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := NewLimited(4)
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 40; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(time.Second * 2)
|
||||
batch.Cancel()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 40)
|
||||
}
|
||||
|
||||
func TestLimitedBatchWaitAll(t *testing.T) {
|
||||
|
||||
var count int
|
||||
var m sync.Mutex
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
m.Lock()
|
||||
count++
|
||||
m.Unlock()
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := NewLimited(4)
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
go func() {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
}()
|
||||
|
||||
batch.WaitAll()
|
||||
|
||||
Equal(t, count, 10)
|
||||
}
|
172
vendor/gopkg.in/go-playground/pool.v3/batch_unlimited_test.go
generated
vendored
172
vendor/gopkg.in/go-playground/pool.v3/batch_unlimited_test.go
generated
vendored
|
@ -1,172 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/go-playground/assert.v1"
|
||||
)
|
||||
|
||||
// NOTES:
|
||||
// - Run "go test" to run tests
|
||||
// - Run "gocov test | gocov report" to report on test converage by file
|
||||
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
|
||||
//
|
||||
// or
|
||||
//
|
||||
// -- may be a good idea to change to output path to somewherelike /tmp
|
||||
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
|
||||
//
|
||||
|
||||
func TestUnlimitedBatch(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 4)
|
||||
}
|
||||
|
||||
func TestUnlimitedBatchGlobalPool(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
batch := unlimitedGpool.Batch()
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 4)
|
||||
}
|
||||
|
||||
func TestUnlimitedBatchCancelItemsThrownAway(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 40; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
}()
|
||||
|
||||
batch.Cancel()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
NotEqual(t, count, 40)
|
||||
}
|
||||
|
||||
func TestUnlimitedBatchCancelItemsCancelledAfterward(t *testing.T) {
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 40; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(time.Second * 2)
|
||||
batch.Cancel()
|
||||
|
||||
var count int
|
||||
|
||||
for range batch.Results() {
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 40)
|
||||
}
|
||||
|
||||
func TestUnlimitedBatchWaitAll(t *testing.T) {
|
||||
|
||||
var count int
|
||||
var m sync.Mutex
|
||||
|
||||
newFunc := func(i int) func(WorkUnit) (interface{}, error) {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Second * 1)
|
||||
m.Lock()
|
||||
count++
|
||||
m.Unlock()
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
go func() {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
batch.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
}()
|
||||
|
||||
batch.WaitAll()
|
||||
|
||||
Equal(t, count, 10)
|
||||
}
|
261
vendor/gopkg.in/go-playground/pool.v3/doc.go
generated
vendored
261
vendor/gopkg.in/go-playground/pool.v3/doc.go
generated
vendored
|
@ -1,261 +0,0 @@
|
|||
/*
|
||||
Package pool implements a limited consumer goroutine or unlimited goroutine pool for easier goroutine handling and cancellation.
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
- Dead simple to use and makes no assumptions about how you will use it.
|
||||
- Automatic recovery from consumer goroutines which returns an error to
|
||||
the results
|
||||
|
||||
Pool v2 advantages over Pool v1:
|
||||
|
||||
- Up to 300% faster due to lower contention,
|
||||
BenchmarkSmallRun used to take 3 seconds, now 1 second
|
||||
- Cancels are much faster
|
||||
- Easier to use, no longer need to know the # of Work Units to be processed.
|
||||
- Pool can now be used as a long running/globally defined pool if desired,
|
||||
v1 Pool was only good for one run
|
||||
- Supports single units of work as well as batching
|
||||
- Pool can easily be reset after a Close() or Cancel() for reuse.
|
||||
- Multiple Batches can be run and even cancelled on the same Pool.
|
||||
- Supports individual Work Unit cancellation.
|
||||
|
||||
Pool v3 advantages over Pool v2:
|
||||
|
||||
- Objects are not interfaces allowing for less breaking changes going forward.
|
||||
- Now there are 2 Pool types, both completely interchangeable, a limited worker pool
|
||||
and unlimited pool.
|
||||
- Simpler usage of Work Units, instead of `<-work.Done` now can do `work.Wait()`
|
||||
|
||||
Important Information READ THIS!
|
||||
|
||||
important usage information
|
||||
|
||||
- It is recommended that you cancel a pool or batch from the calling
|
||||
function and not inside of the Unit of Work, it will work fine, however
|
||||
because of the goroutine scheduler and context switching it may not
|
||||
cancel as soon as if called from outside.
|
||||
|
||||
- When Batching DO NOT FORGET TO CALL batch.QueueComplete(),
|
||||
if you do the Batch WILL deadlock
|
||||
|
||||
- It is your responsibility to call WorkUnit.IsCancelled() to check if it's cancelled
|
||||
after a blocking operation like waiting for a connection from a pool. (optional)
|
||||
|
||||
|
||||
Usage and documentation
|
||||
|
||||
both Limited Pool and Unlimited Pool have the same signatures and are completely interchangeable.
|
||||
|
||||
Per Unit Work
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gopkg.in/go-playground/pool.v3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
p := pool.NewLimited(10)
|
||||
defer p.Close()
|
||||
|
||||
user := p.Queue(getUser(13))
|
||||
other := p.Queue(getOtherInfo(13))
|
||||
|
||||
user.Wait()
|
||||
if err := user.Error(); err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// do stuff with user
|
||||
username := user.Value().(string)
|
||||
fmt.Println(username)
|
||||
|
||||
other.Wait()
|
||||
if err := other.Error(); err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// do stuff with other
|
||||
otherInfo := other.Value().(string)
|
||||
fmt.Println(otherInfo)
|
||||
}
|
||||
|
||||
func getUser(id int) pool.WorkFunc {
|
||||
|
||||
return func(wu pool.WorkUnit) (interface{}, error) {
|
||||
|
||||
// simulate waiting for something, like TCP connection to be established
|
||||
// or connection from pool grabbed
|
||||
time.Sleep(time.Second * 1)
|
||||
|
||||
if wu.IsCancelled() {
|
||||
// return values not used
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ready for processing...
|
||||
|
||||
return "Joeybloggs", nil
|
||||
}
|
||||
}
|
||||
|
||||
func getOtherInfo(id int) pool.WorkFunc {
|
||||
|
||||
return func(wu pool.WorkUnit) (interface{}, error) {
|
||||
|
||||
// simulate waiting for something, like TCP connection to be established
|
||||
// or connection from pool grabbed
|
||||
time.Sleep(time.Second * 1)
|
||||
|
||||
if wu.IsCancelled() {
|
||||
// return values not used
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ready for processing...
|
||||
|
||||
return "Other Info", nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Batch Work
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gopkg.in/go-playground/pool.v3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
p := pool.NewLimited(10)
|
||||
defer p.Close()
|
||||
|
||||
batch := p.Batch()
|
||||
|
||||
// for max speed Queue in another goroutine
|
||||
// but it is not required, just can't start reading results
|
||||
// until all items are Queued.
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 10; i++ {
|
||||
batch.Queue(sendEmail("email content"))
|
||||
}
|
||||
|
||||
// DO NOT FORGET THIS OR GOROUTINES WILL DEADLOCK
|
||||
// if calling Cancel() it calles QueueComplete() internally
|
||||
batch.QueueComplete()
|
||||
}()
|
||||
|
||||
for email := range batch.Results() {
|
||||
|
||||
if err := email.Error(); err != nil {
|
||||
// handle error
|
||||
// maybe call batch.Cancel()
|
||||
}
|
||||
|
||||
// use return value
|
||||
fmt.Println(email.Value().(bool))
|
||||
}
|
||||
}
|
||||
|
||||
func sendEmail(email string) pool.WorkFunc {
|
||||
return func(wu pool.WorkUnit) (interface{}, error) {
|
||||
|
||||
// simulate waiting for something, like TCP connection to be established
|
||||
// or connection from pool grabbed
|
||||
time.Sleep(time.Second * 1)
|
||||
|
||||
if wu.IsCancelled() {
|
||||
// return values not used
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ready for processing...
|
||||
|
||||
return true, nil // everything ok, send nil, error if not
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Benchmarks
|
||||
|
||||
Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go 1.6.2
|
||||
|
||||
run with 1, 2, 4,8 and 16 cpu to show it scales well...16 is double the # of logical cores on this machine.
|
||||
|
||||
NOTE: Cancellation times CAN vary depending how busy your system is and how the goroutine scheduler is but
|
||||
worse case I've seen is 1 second to cancel instead of 0ns
|
||||
|
||||
go test -cpu=1,2,4,8,16 -bench=. -benchmem=true
|
||||
PASS
|
||||
BenchmarkLimitedSmallRun 1 1002492008 ns/op 3552 B/op 55 allocs/op
|
||||
BenchmarkLimitedSmallRun-2 1 1002347196 ns/op 3568 B/op 55 allocs/op
|
||||
BenchmarkLimitedSmallRun-4 1 1010533571 ns/op 4720 B/op 73 allocs/op
|
||||
BenchmarkLimitedSmallRun-8 1 1008883324 ns/op 4080 B/op 63 allocs/op
|
||||
BenchmarkLimitedSmallRun-16 1 1002317677 ns/op 3632 B/op 56 allocs/op
|
||||
BenchmarkLimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-8 1000000 1006 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun 1 4027153081 ns/op 36176 B/op 572 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-2 1 4003489261 ns/op 32336 B/op 512 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-4 1 4005579847 ns/op 34128 B/op 540 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-8 1 4004639857 ns/op 34992 B/op 553 allocs/op
|
||||
BenchmarkLimitedOverconsumeLargeRun-16 1 4022695297 ns/op 36864 B/op 532 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun 1 1000785511 ns/op 6336 B/op 94 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-2 1 1001459945 ns/op 4480 B/op 65 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-4 1 1002475371 ns/op 6672 B/op 99 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-8 1 1002498902 ns/op 4624 B/op 67 allocs/op
|
||||
BenchmarkLimitedBatchSmallRun-16 1 1002202273 ns/op 5344 B/op 78 allocs/op
|
||||
BenchmarkUnlimitedSmallRun 1 1002361538 ns/op 3696 B/op 59 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-2 1 1002230293 ns/op 3776 B/op 60 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-4 1 1002148953 ns/op 3776 B/op 60 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-8 1 1002120679 ns/op 3584 B/op 57 allocs/op
|
||||
BenchmarkUnlimitedSmallRun-16 1 1001698519 ns/op 3968 B/op 63 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedSmallCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-2 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-4 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-8 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeCancel-16 2000000000 0.00 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkUnlimitedLargeRun 1 1001631711 ns/op 40352 B/op 603 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-2 1 1002603908 ns/op 38304 B/op 586 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-4 1 1001452975 ns/op 38192 B/op 584 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-8 1 1005382882 ns/op 35200 B/op 537 allocs/op
|
||||
BenchmarkUnlimitedLargeRun-16 1 1001818482 ns/op 37056 B/op 566 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun 1 1002391247 ns/op 4240 B/op 63 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-2 1 1010313222 ns/op 4688 B/op 70 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-4 1 1008364651 ns/op 4304 B/op 64 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-8 1 1001858192 ns/op 4448 B/op 66 allocs/op
|
||||
BenchmarkUnlimitedBatchSmallRun-16 1 1001228000 ns/op 4320 B/op 64 allocs/op
|
||||
|
||||
To put some of these benchmarks in perspective:
|
||||
|
||||
- BenchmarkLimitedSmallRun did 10 seconds worth of processing in 1.002492008s
|
||||
- BenchmarkLimitedSmallCancel ran 20 jobs, cancelled on job 6 and and ran in 0s
|
||||
- BenchmarkLimitedLargeCancel ran 1000 jobs, cancelled on job 6 and and ran in 0s
|
||||
- BenchmarkLimitedOverconsumeLargeRun ran 100 jobs using 25 workers in 4.027153081s
|
||||
|
||||
*/
|
||||
package pool
|
37
vendor/gopkg.in/go-playground/pool.v3/errors.go
generated
vendored
37
vendor/gopkg.in/go-playground/pool.v3/errors.go
generated
vendored
|
@ -1,37 +0,0 @@
|
|||
package pool
|
||||
|
||||
const (
|
||||
errCancelled = "ERROR: Work Unit Cancelled"
|
||||
errRecovery = "ERROR: Work Unit failed due to a recoverable error: '%v'\n, Stack Trace:\n %s"
|
||||
errClosed = "ERROR: Work Unit added/run after the pool had been closed or cancelled"
|
||||
)
|
||||
|
||||
// ErrRecovery contains the error when a consumer goroutine needed to be recovers
|
||||
type ErrRecovery struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// Error prints recovery error
|
||||
func (e *ErrRecovery) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// ErrPoolClosed is the error returned to all work units that may have been in or added to the pool after it's closing.
|
||||
type ErrPoolClosed struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// Error prints Work Unit Close error
|
||||
func (e *ErrPoolClosed) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// ErrCancelled is the error returned to a Work Unit when it has been cancelled.
|
||||
type ErrCancelled struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// Error prints Work Unit Cancellation error
|
||||
func (e *ErrCancelled) Error() string {
|
||||
return e.s
|
||||
}
|
200
vendor/gopkg.in/go-playground/pool.v3/limited_pool.go
generated
vendored
200
vendor/gopkg.in/go-playground/pool.v3/limited_pool.go
generated
vendored
|
@ -1,200 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ Pool = new(limitedPool)
|
||||
|
||||
// limitedPool contains all information for a limited pool instance.
|
||||
type limitedPool struct {
|
||||
workers uint
|
||||
work chan *workUnit
|
||||
cancel chan struct{}
|
||||
closed bool
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
// NewLimited returns a new limited pool instance
|
||||
func NewLimited(workers uint) Pool {
|
||||
|
||||
if workers == 0 {
|
||||
panic("invalid workers '0'")
|
||||
}
|
||||
|
||||
p := &limitedPool{
|
||||
workers: workers,
|
||||
}
|
||||
|
||||
p.initialize()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *limitedPool) initialize() {
|
||||
|
||||
p.work = make(chan *workUnit, p.workers*2)
|
||||
p.cancel = make(chan struct{})
|
||||
p.closed = false
|
||||
|
||||
// fire up workers here
|
||||
for i := 0; i < int(p.workers); i++ {
|
||||
p.newWorker(p.work, p.cancel)
|
||||
}
|
||||
}
|
||||
|
||||
// passing work and cancel channels to newWorker() to avoid any potential race condition
|
||||
// betweeen p.work read & write
|
||||
func (p *limitedPool) newWorker(work chan *workUnit, cancel chan struct{}) {
|
||||
go func(p *limitedPool) {
|
||||
|
||||
var wu *workUnit
|
||||
|
||||
defer func(p *limitedPool) {
|
||||
if err := recover(); err != nil {
|
||||
|
||||
trace := make([]byte, 1<<16)
|
||||
n := runtime.Stack(trace, true)
|
||||
|
||||
s := fmt.Sprintf(errRecovery, err, string(trace[:int(math.Min(float64(n), float64(7000)))]))
|
||||
|
||||
iwu := wu
|
||||
iwu.err = &ErrRecovery{s: s}
|
||||
close(iwu.done)
|
||||
|
||||
// need to fire up new worker to replace this one as this one is exiting
|
||||
p.newWorker(p.work, p.cancel)
|
||||
}
|
||||
}(p)
|
||||
|
||||
var value interface{}
|
||||
var err error
|
||||
|
||||
for {
|
||||
select {
|
||||
case wu = <-work:
|
||||
|
||||
// possible for one more nilled out value to make it
|
||||
// through when channel closed, don't quite understad the why
|
||||
if wu == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// support for individual WorkUnit cancellation
|
||||
// and batch job cancellation
|
||||
if wu.cancelled.Load() == nil {
|
||||
value, err = wu.fn(wu)
|
||||
|
||||
wu.writing.Store(struct{}{})
|
||||
|
||||
// need to check again in case the WorkFunc cancelled this unit of work
|
||||
// otherwise we'll have a race condition
|
||||
if wu.cancelled.Load() == nil && wu.cancelling.Load() == nil {
|
||||
wu.value, wu.err = value, err
|
||||
|
||||
// who knows where the Done channel is being listened to on the other end
|
||||
// don't want this to block just because caller is waiting on another unit
|
||||
// of work to be done first so we use close
|
||||
close(wu.done)
|
||||
}
|
||||
}
|
||||
|
||||
case <-cancel:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}(p)
|
||||
}
|
||||
|
||||
// Queue queues the work to be run, and starts processing immediately
|
||||
func (p *limitedPool) Queue(fn WorkFunc) WorkUnit {
|
||||
|
||||
w := &workUnit{
|
||||
done: make(chan struct{}),
|
||||
fn: fn,
|
||||
}
|
||||
|
||||
go func() {
|
||||
p.m.RLock()
|
||||
if p.closed {
|
||||
w.err = &ErrPoolClosed{s: errClosed}
|
||||
if w.cancelled.Load() == nil {
|
||||
close(w.done)
|
||||
}
|
||||
p.m.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
p.work <- w
|
||||
|
||||
p.m.RUnlock()
|
||||
}()
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
// Reset reinitializes a pool that has been closed/cancelled back to a working state.
|
||||
// if the pool has not been closed/cancelled, nothing happens as the pool is still in
|
||||
// a valid running state
|
||||
func (p *limitedPool) Reset() {
|
||||
|
||||
p.m.Lock()
|
||||
|
||||
if !p.closed {
|
||||
p.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// cancelled the pool, not closed it, pool will be usable after calling initialize().
|
||||
p.initialize()
|
||||
p.m.Unlock()
|
||||
}
|
||||
|
||||
func (p *limitedPool) closeWithError(err error) {
|
||||
|
||||
p.m.Lock()
|
||||
|
||||
if !p.closed {
|
||||
close(p.cancel)
|
||||
close(p.work)
|
||||
p.closed = true
|
||||
}
|
||||
|
||||
for wu := range p.work {
|
||||
wu.cancelWithError(err)
|
||||
}
|
||||
|
||||
p.m.Unlock()
|
||||
}
|
||||
|
||||
// Cancel cleans up the pool workers and channels and cancels and pending
|
||||
// work still yet to be processed.
|
||||
// call Reset() to reinitialize the pool for use.
|
||||
func (p *limitedPool) Cancel() {
|
||||
|
||||
err := &ErrCancelled{s: errCancelled}
|
||||
p.closeWithError(err)
|
||||
}
|
||||
|
||||
// Close cleans up the pool workers and channels and cancels any pending
|
||||
// work still yet to be processed.
|
||||
// call Reset() to reinitialize the pool for use.
|
||||
func (p *limitedPool) Close() {
|
||||
|
||||
err := &ErrPoolClosed{s: errClosed}
|
||||
p.closeWithError(err)
|
||||
}
|
||||
|
||||
// Batch creates a new Batch object for queueing Work Units separate from any others
|
||||
// that may be running on the pool. Grouping these Work Units together allows for individual
|
||||
// Cancellation of the Batch Work Units without affecting anything else running on the pool
|
||||
// as well as outputting the results on a channel as they complete.
|
||||
// NOTE: Batch is not reusable, once QueueComplete() has been called it's lifetime has been sealed
|
||||
// to completing the Queued items.
|
||||
func (p *limitedPool) Batch() Batch {
|
||||
return newBatch(p)
|
||||
}
|
185
vendor/gopkg.in/go-playground/pool.v3/limited_pool_benchmarks_test.go
generated
vendored
185
vendor/gopkg.in/go-playground/pool.v3/limited_pool_benchmarks_test.go
generated
vendored
|
@ -1,185 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkLimitedSmallRun(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 10)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := NewLimited(10)
|
||||
defer pool.Close()
|
||||
|
||||
fn := func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
res[i] = pool.Queue(fn)
|
||||
}
|
||||
|
||||
var count int
|
||||
|
||||
for _, cw := range res {
|
||||
|
||||
cw.Wait()
|
||||
|
||||
if cw.Error() == nil {
|
||||
count += cw.Value().(int)
|
||||
}
|
||||
}
|
||||
|
||||
if count != 10 {
|
||||
b.Fatal("Count Incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLimitedSmallCancel(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 0, 20)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := NewLimited(4)
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(i int) WorkFunc {
|
||||
return func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
if i == 6 {
|
||||
pool.Cancel()
|
||||
}
|
||||
res = append(res, pool.Queue(newFunc(i)))
|
||||
}
|
||||
|
||||
for _, wrk := range res {
|
||||
if wrk == nil {
|
||||
continue
|
||||
}
|
||||
wrk.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLimitedLargeCancel(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 0, 1000)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := NewLimited(4)
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(i int) WorkFunc {
|
||||
return func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
if i == 6 {
|
||||
pool.Cancel()
|
||||
}
|
||||
res = append(res, pool.Queue(newFunc(i)))
|
||||
}
|
||||
|
||||
for _, wrk := range res {
|
||||
if wrk == nil {
|
||||
continue
|
||||
}
|
||||
wrk.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLimitedOverconsumeLargeRun(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 100)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := NewLimited(25)
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(i int) WorkFunc {
|
||||
return func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
res[i] = pool.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
var count int
|
||||
|
||||
for _, cw := range res {
|
||||
|
||||
cw.Wait()
|
||||
|
||||
count += cw.Value().(int)
|
||||
}
|
||||
|
||||
if count != 100 {
|
||||
b.Fatalf("Count Incorrect, Expected '100' Got '%d'", count)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLimitedBatchSmallRun(b *testing.B) {
|
||||
|
||||
fn := func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
pool := NewLimited(10)
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
batch.Queue(fn)
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
|
||||
var count int
|
||||
|
||||
for cw := range batch.Results() {
|
||||
count += cw.Value().(int)
|
||||
}
|
||||
|
||||
if count != 10 {
|
||||
b.Fatal("Count Incorrect")
|
||||
}
|
||||
}
|
177
vendor/gopkg.in/go-playground/pool.v3/limited_pool_test.go
generated
vendored
177
vendor/gopkg.in/go-playground/pool.v3/limited_pool_test.go
generated
vendored
|
@ -1,177 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/go-playground/assert.v1"
|
||||
)
|
||||
|
||||
// NOTES:
|
||||
// - Run "go test" to run tests
|
||||
// - Run "gocov test | gocov report" to report on test converage by file
|
||||
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
|
||||
//
|
||||
// or
|
||||
//
|
||||
// -- may be a good idea to change to output path to somewherelike /tmp
|
||||
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
|
||||
//
|
||||
|
||||
func TestPool(t *testing.T) {
|
||||
|
||||
var res []WorkUnit
|
||||
|
||||
pool := NewLimited(4)
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(d time.Duration) WorkFunc {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(d)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
wu := pool.Queue(newFunc(time.Second * 1))
|
||||
res = append(res, wu)
|
||||
}
|
||||
|
||||
var count int
|
||||
|
||||
for _, wu := range res {
|
||||
wu.Wait()
|
||||
Equal(t, wu.Error(), nil)
|
||||
Equal(t, wu.Value(), nil)
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 4)
|
||||
|
||||
pool.Close() // testing no error occurs as Close will be called twice once defer pool.Close() fires
|
||||
}
|
||||
|
||||
func TestCancel(t *testing.T) {
|
||||
|
||||
m := new(sync.RWMutex)
|
||||
var closed bool
|
||||
c := make(chan WorkUnit, 100)
|
||||
|
||||
pool := limitedGpool
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(d time.Duration) WorkFunc {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(d)
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
go func(ch chan WorkUnit) {
|
||||
for i := 0; i < 40; i++ {
|
||||
|
||||
go func(ch chan WorkUnit) {
|
||||
m.RLock()
|
||||
if closed {
|
||||
m.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
ch <- pool.Queue(newFunc(time.Second * 1))
|
||||
m.RUnlock()
|
||||
}(ch)
|
||||
}
|
||||
}(c)
|
||||
|
||||
time.Sleep(time.Second * 1)
|
||||
pool.Cancel()
|
||||
m.Lock()
|
||||
closed = true
|
||||
close(c)
|
||||
m.Unlock()
|
||||
|
||||
var count int
|
||||
|
||||
for wu := range c {
|
||||
wu.Wait()
|
||||
|
||||
if wu.Error() != nil {
|
||||
_, ok := wu.Error().(*ErrCancelled)
|
||||
if !ok {
|
||||
_, ok = wu.Error().(*ErrPoolClosed)
|
||||
if ok {
|
||||
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
|
||||
}
|
||||
} else {
|
||||
Equal(t, wu.Error().Error(), "ERROR: Work Unit Cancelled")
|
||||
}
|
||||
|
||||
Equal(t, ok, true)
|
||||
continue
|
||||
}
|
||||
|
||||
count += wu.Value().(int)
|
||||
}
|
||||
|
||||
NotEqual(t, count, 40)
|
||||
|
||||
// reset and test again
|
||||
pool.Reset()
|
||||
|
||||
wrk := pool.Queue(newFunc(time.Millisecond * 300))
|
||||
wrk.Wait()
|
||||
|
||||
_, ok := wrk.Value().(int)
|
||||
Equal(t, ok, true)
|
||||
|
||||
wrk = pool.Queue(newFunc(time.Millisecond * 300))
|
||||
time.Sleep(time.Second * 1)
|
||||
wrk.Cancel()
|
||||
wrk.Wait() // proving we don't get stuck here after cancel
|
||||
Equal(t, wrk.Error(), nil)
|
||||
|
||||
pool.Reset() // testing that we can do this and nothing bad will happen as it checks if pool closed
|
||||
|
||||
pool.Close()
|
||||
|
||||
wu := pool.Queue(newFunc(time.Second * 1))
|
||||
wu.Wait()
|
||||
NotEqual(t, wu.Error(), nil)
|
||||
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
|
||||
}
|
||||
|
||||
func TestPanicRecovery(t *testing.T) {
|
||||
|
||||
pool := NewLimited(2)
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(d time.Duration, i int) WorkFunc {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
if i == 1 {
|
||||
panic("OMG OMG OMG! something bad happened!")
|
||||
}
|
||||
time.Sleep(d)
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
var wrk WorkUnit
|
||||
for i := 0; i < 4; i++ {
|
||||
time.Sleep(time.Second * 1)
|
||||
if i == 1 {
|
||||
wrk = pool.Queue(newFunc(time.Second*1, i))
|
||||
continue
|
||||
}
|
||||
pool.Queue(newFunc(time.Second*1, i))
|
||||
}
|
||||
wrk.Wait()
|
||||
|
||||
NotEqual(t, wrk.Error(), nil)
|
||||
Equal(t, wrk.Error().Error()[0:90], "ERROR: Work Unit failed due to a recoverable error: 'OMG OMG OMG! something bad happened!'")
|
||||
|
||||
}
|
||||
|
||||
func TestBadWorkerCount(t *testing.T) {
|
||||
PanicMatches(t, func() { NewLimited(0) }, "invalid workers '0'")
|
||||
}
|
32
vendor/gopkg.in/go-playground/pool.v3/pool.go
generated
vendored
32
vendor/gopkg.in/go-playground/pool.v3/pool.go
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
package pool
|
||||
|
||||
// Pool contains all information for a pool instance.
|
||||
type Pool interface {
|
||||
|
||||
// Queue queues the work to be run, and starts processing immediately
|
||||
Queue(fn WorkFunc) WorkUnit
|
||||
|
||||
// Reset reinitializes a pool that has been closed/cancelled back to a working
|
||||
// state. if the pool has not been closed/cancelled, nothing happens as the pool
|
||||
// is still in a valid running state
|
||||
Reset()
|
||||
|
||||
// Cancel cancels any pending work still not committed to processing.
|
||||
// Call Reset() to reinitialize the pool for use.
|
||||
Cancel()
|
||||
|
||||
// Close cleans up pool data and cancels any pending work still not committed
|
||||
// to processing. Call Reset() to reinitialize the pool for use.
|
||||
Close()
|
||||
|
||||
// Batch creates a new Batch object for queueing Work Units separate from any
|
||||
// others that may be running on the pool. Grouping these Work Units together
|
||||
// allows for individual Cancellation of the Batch Work Units without affecting
|
||||
// anything else running on the pool as well as outputting the results on a
|
||||
// channel as they complete. NOTE: Batch is not reusable, once QueueComplete()
|
||||
// has been called it's lifetime has been sealed to completing the Queued items.
|
||||
Batch() Batch
|
||||
}
|
||||
|
||||
// WorkFunc is the function type needed by the pool for execution
|
||||
type WorkFunc func(wu WorkUnit) (interface{}, error)
|
36
vendor/gopkg.in/go-playground/pool.v3/pool_test.go
generated
vendored
36
vendor/gopkg.in/go-playground/pool.v3/pool_test.go
generated
vendored
|
@ -1,36 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// NOTES:
|
||||
// - Run "go test" to run tests
|
||||
// - Run "gocov test | gocov report" to report on test converage by file
|
||||
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
|
||||
//
|
||||
// or
|
||||
//
|
||||
// -- may be a good idea to change to output path to somewherelike /tmp
|
||||
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
|
||||
//
|
||||
|
||||
// global pool for testing long running pool
|
||||
var limitedGpool Pool
|
||||
|
||||
var unlimitedGpool Pool
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
// setup
|
||||
limitedGpool = NewLimited(4)
|
||||
defer limitedGpool.Close()
|
||||
|
||||
unlimitedGpool = New()
|
||||
defer unlimitedGpool.Close()
|
||||
|
||||
os.Exit(m.Run())
|
||||
|
||||
// teardown
|
||||
}
|
164
vendor/gopkg.in/go-playground/pool.v3/unlimited_pool.go
generated
vendored
164
vendor/gopkg.in/go-playground/pool.v3/unlimited_pool.go
generated
vendored
|
@ -1,164 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ Pool = new(unlimitedPool)
|
||||
|
||||
// unlimitedPool contains all information for an unlimited pool instance.
|
||||
type unlimitedPool struct {
|
||||
units []*workUnit
|
||||
cancel chan struct{}
|
||||
closed bool
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
// New returns a new unlimited pool instance
|
||||
func New() Pool {
|
||||
|
||||
p := &unlimitedPool{
|
||||
units: make([]*workUnit, 0, 4), // init capacity to 4, assuming if using pool, then probably a few have at least that many and will reduce array resizes
|
||||
}
|
||||
p.initialize()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *unlimitedPool) initialize() {
|
||||
|
||||
p.cancel = make(chan struct{})
|
||||
p.closed = false
|
||||
}
|
||||
|
||||
// Queue queues the work to be run, and starts processing immediately
|
||||
func (p *unlimitedPool) Queue(fn WorkFunc) WorkUnit {
|
||||
|
||||
w := &workUnit{
|
||||
done: make(chan struct{}),
|
||||
fn: fn,
|
||||
}
|
||||
|
||||
p.m.Lock()
|
||||
|
||||
if p.closed {
|
||||
w.err = &ErrPoolClosed{s: errClosed}
|
||||
// if w.cancelled.Load() == nil {
|
||||
close(w.done)
|
||||
// }
|
||||
p.m.Unlock()
|
||||
return w
|
||||
}
|
||||
|
||||
p.units = append(p.units, w)
|
||||
go func(w *workUnit) {
|
||||
|
||||
defer func(w *workUnit) {
|
||||
if err := recover(); err != nil {
|
||||
|
||||
trace := make([]byte, 1<<16)
|
||||
n := runtime.Stack(trace, true)
|
||||
|
||||
s := fmt.Sprintf(errRecovery, err, string(trace[:int(math.Min(float64(n), float64(7000)))]))
|
||||
|
||||
w.cancelled.Store(struct{}{})
|
||||
w.err = &ErrRecovery{s: s}
|
||||
close(w.done)
|
||||
}
|
||||
}(w)
|
||||
|
||||
// support for individual WorkUnit cancellation
|
||||
// and batch job cancellation
|
||||
if w.cancelled.Load() == nil {
|
||||
val, err := w.fn(w)
|
||||
|
||||
w.writing.Store(struct{}{})
|
||||
|
||||
// need to check again in case the WorkFunc cancelled this unit of work
|
||||
// otherwise we'll have a race condition
|
||||
if w.cancelled.Load() == nil && w.cancelling.Load() == nil {
|
||||
|
||||
w.value, w.err = val, err
|
||||
|
||||
// who knows where the Done channel is being listened to on the other end
|
||||
// don't want this to block just because caller is waiting on another unit
|
||||
// of work to be done first so we use close
|
||||
close(w.done)
|
||||
}
|
||||
}
|
||||
}(w)
|
||||
|
||||
p.m.Unlock()
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
// Reset reinitializes a pool that has been closed/cancelled back to a working state.
|
||||
// if the pool has not been closed/cancelled, nothing happens as the pool is still in
|
||||
// a valid running state
|
||||
func (p *unlimitedPool) Reset() {
|
||||
|
||||
p.m.Lock()
|
||||
|
||||
if !p.closed {
|
||||
p.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// cancelled the pool, not closed it, pool will be usable after calling initialize().
|
||||
p.initialize()
|
||||
p.m.Unlock()
|
||||
}
|
||||
|
||||
func (p *unlimitedPool) closeWithError(err error) {
|
||||
|
||||
p.m.Lock()
|
||||
|
||||
if !p.closed {
|
||||
close(p.cancel)
|
||||
p.closed = true
|
||||
|
||||
// clear out array values for garbage collection, but reuse array just in case going to reuse
|
||||
// go in reverse order to try and cancel as many as possbile
|
||||
// one at end are less likely to have run than those at the beginning
|
||||
for i := len(p.units) - 1; i >= 0; i-- {
|
||||
p.units[i].cancelWithError(err)
|
||||
p.units[i] = nil
|
||||
}
|
||||
|
||||
p.units = p.units[0:0]
|
||||
}
|
||||
|
||||
p.m.Unlock()
|
||||
}
|
||||
|
||||
// Cancel cleans up the pool workers and channels and cancels and pending
|
||||
// work still yet to be processed.
|
||||
// call Reset() to reinitialize the pool for use.
|
||||
func (p *unlimitedPool) Cancel() {
|
||||
|
||||
err := &ErrCancelled{s: errCancelled}
|
||||
p.closeWithError(err)
|
||||
}
|
||||
|
||||
// Close cleans up the pool workers and channels and cancels any pending
|
||||
// work still yet to be processed.
|
||||
// call Reset() to reinitialize the pool for use.
|
||||
func (p *unlimitedPool) Close() {
|
||||
|
||||
err := &ErrPoolClosed{s: errClosed}
|
||||
p.closeWithError(err)
|
||||
}
|
||||
|
||||
// Batch creates a new Batch object for queueing Work Units separate from any others
|
||||
// that may be running on the pool. Grouping these Work Units together allows for individual
|
||||
// Cancellation of the Batch Work Units without affecting anything else running on the pool
|
||||
// as well as outputting the results on a channel as they complete.
|
||||
// NOTE: Batch is not reusable, once QueueComplete() has been called it's lifetime has been sealed
|
||||
// to completing the Queued items.
|
||||
func (p *unlimitedPool) Batch() Batch {
|
||||
return newBatch(p)
|
||||
}
|
185
vendor/gopkg.in/go-playground/pool.v3/unlimited_pool_benchmarks_test.go
generated
vendored
185
vendor/gopkg.in/go-playground/pool.v3/unlimited_pool_benchmarks_test.go
generated
vendored
|
@ -1,185 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkUnlimitedSmallRun(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 10)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
fn := func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
res[i] = pool.Queue(fn)
|
||||
}
|
||||
|
||||
var count int
|
||||
|
||||
for _, cw := range res {
|
||||
|
||||
cw.Wait()
|
||||
|
||||
if cw.Error() == nil {
|
||||
count += cw.Value().(int)
|
||||
}
|
||||
}
|
||||
|
||||
if count != 10 {
|
||||
b.Fatal("Count Incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnlimitedSmallCancel(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 0, 20)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(i int) WorkFunc {
|
||||
return func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
if i == 6 {
|
||||
pool.Cancel()
|
||||
}
|
||||
res = append(res, pool.Queue(newFunc(i)))
|
||||
}
|
||||
|
||||
for _, wrk := range res {
|
||||
if wrk == nil {
|
||||
continue
|
||||
}
|
||||
wrk.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnlimitedLargeCancel(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 0, 1000)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(i int) WorkFunc {
|
||||
return func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
if i == 6 {
|
||||
pool.Cancel()
|
||||
}
|
||||
res = append(res, pool.Queue(newFunc(i)))
|
||||
}
|
||||
|
||||
for _, wrk := range res {
|
||||
if wrk == nil {
|
||||
continue
|
||||
}
|
||||
wrk.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnlimitedLargeRun(b *testing.B) {
|
||||
|
||||
res := make([]WorkUnit, 100)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(i int) WorkFunc {
|
||||
return func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
res[i] = pool.Queue(newFunc(i))
|
||||
}
|
||||
|
||||
var count int
|
||||
|
||||
for _, cw := range res {
|
||||
|
||||
cw.Wait()
|
||||
|
||||
count += cw.Value().(int)
|
||||
}
|
||||
|
||||
if count != 100 {
|
||||
b.Fatalf("Count Incorrect, Expected '100' Got '%d'", count)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnlimitedBatchSmallRun(b *testing.B) {
|
||||
|
||||
fn := func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
batch := pool.Batch()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
batch.Queue(fn)
|
||||
}
|
||||
|
||||
batch.QueueComplete()
|
||||
|
||||
var count int
|
||||
|
||||
for cw := range batch.Results() {
|
||||
count += cw.Value().(int)
|
||||
}
|
||||
|
||||
if count != 10 {
|
||||
b.Fatal("Count Incorrect")
|
||||
}
|
||||
}
|
194
vendor/gopkg.in/go-playground/pool.v3/unlimited_pool_test.go
generated
vendored
194
vendor/gopkg.in/go-playground/pool.v3/unlimited_pool_test.go
generated
vendored
|
@ -1,194 +0,0 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/go-playground/assert.v1"
|
||||
)
|
||||
|
||||
// NOTES:
|
||||
// - Run "go test" to run tests
|
||||
// - Run "gocov test | gocov report" to report on test converage by file
|
||||
// - Run "gocov test | gocov annotate -" to report on all code and functions, those ,marked with "MISS" were never called
|
||||
//
|
||||
// or
|
||||
//
|
||||
// -- may be a good idea to change to output path to somewherelike /tmp
|
||||
// go test -coverprofile cover.out && go tool cover -html=cover.out -o cover.html
|
||||
//
|
||||
|
||||
func TestUnlimitedPool(t *testing.T) {
|
||||
|
||||
var res []WorkUnit
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(d time.Duration) WorkFunc {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(d)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
wu := pool.Queue(newFunc(time.Second * 1))
|
||||
res = append(res, wu)
|
||||
}
|
||||
|
||||
var count int
|
||||
|
||||
for _, wu := range res {
|
||||
wu.Wait()
|
||||
Equal(t, wu.Error(), nil)
|
||||
Equal(t, wu.Value(), nil)
|
||||
count++
|
||||
}
|
||||
|
||||
Equal(t, count, 4)
|
||||
|
||||
pool.Close() // testing no error occurs as Close will be called twice once defer pool.Close() fires
|
||||
}
|
||||
|
||||
func TestUnlimitedCancel(t *testing.T) {
|
||||
|
||||
m := new(sync.RWMutex)
|
||||
var closed bool
|
||||
c := make(chan WorkUnit, 100)
|
||||
|
||||
pool := unlimitedGpool
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(d time.Duration) WorkFunc {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
time.Sleep(d)
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
go func(ch chan WorkUnit) {
|
||||
for i := 0; i < 40; i++ {
|
||||
|
||||
go func(ch chan WorkUnit) {
|
||||
m.RLock()
|
||||
if !closed {
|
||||
ch <- pool.Queue(newFunc(time.Second * 1))
|
||||
}
|
||||
m.RUnlock()
|
||||
}(ch)
|
||||
}
|
||||
}(c)
|
||||
|
||||
time.Sleep(time.Second * 1)
|
||||
pool.Cancel()
|
||||
m.Lock()
|
||||
closed = true
|
||||
close(c)
|
||||
m.Unlock()
|
||||
|
||||
var count int
|
||||
|
||||
for wu := range c {
|
||||
wu.Wait()
|
||||
|
||||
if wu.Error() != nil {
|
||||
_, ok := wu.Error().(*ErrCancelled)
|
||||
if !ok {
|
||||
_, ok = wu.Error().(*ErrPoolClosed)
|
||||
if ok {
|
||||
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
|
||||
}
|
||||
} else {
|
||||
Equal(t, wu.Error().Error(), "ERROR: Work Unit Cancelled")
|
||||
}
|
||||
|
||||
Equal(t, ok, true)
|
||||
continue
|
||||
}
|
||||
|
||||
count += wu.Value().(int)
|
||||
}
|
||||
|
||||
NotEqual(t, count, 40)
|
||||
|
||||
// reset and test again
|
||||
pool.Reset()
|
||||
|
||||
wrk := pool.Queue(newFunc(time.Millisecond * 300))
|
||||
wrk.Wait()
|
||||
|
||||
_, ok := wrk.Value().(int)
|
||||
Equal(t, ok, true)
|
||||
|
||||
wrk = pool.Queue(newFunc(time.Millisecond * 300))
|
||||
time.Sleep(time.Second * 1)
|
||||
wrk.Cancel()
|
||||
wrk.Wait() // proving we don't get stuck here after cancel
|
||||
Equal(t, wrk.Error(), nil)
|
||||
|
||||
pool.Reset() // testing that we can do this and nothing bad will happen as it checks if pool closed
|
||||
|
||||
pool.Close()
|
||||
|
||||
wu := pool.Queue(newFunc(time.Second * 1))
|
||||
wu.Wait()
|
||||
NotEqual(t, wu.Error(), nil)
|
||||
Equal(t, wu.Error().Error(), "ERROR: Work Unit added/run after the pool had been closed or cancelled")
|
||||
}
|
||||
|
||||
func TestCancelFromWithin(t *testing.T) {
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(d time.Duration) WorkFunc {
|
||||
return func(wu WorkUnit) (interface{}, error) {
|
||||
time.Sleep(d)
|
||||
if wu.IsCancelled() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
q := pool.Queue(newFunc(time.Second * 5))
|
||||
|
||||
time.Sleep(time.Second * 2)
|
||||
pool.Cancel()
|
||||
|
||||
Equal(t, q.Value() == nil, true)
|
||||
NotEqual(t, q.Error(), nil)
|
||||
Equal(t, q.Error().Error(), "ERROR: Work Unit Cancelled")
|
||||
}
|
||||
|
||||
func TestUnlimitedPanicRecovery(t *testing.T) {
|
||||
|
||||
pool := New()
|
||||
defer pool.Close()
|
||||
|
||||
newFunc := func(d time.Duration, i int) WorkFunc {
|
||||
return func(WorkUnit) (interface{}, error) {
|
||||
if i == 1 {
|
||||
panic("OMG OMG OMG! something bad happened!")
|
||||
}
|
||||
time.Sleep(d)
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
var wrk WorkUnit
|
||||
for i := 0; i < 4; i++ {
|
||||
time.Sleep(time.Second * 1)
|
||||
if i == 1 {
|
||||
wrk = pool.Queue(newFunc(time.Second*1, i))
|
||||
continue
|
||||
}
|
||||
pool.Queue(newFunc(time.Second*1, i))
|
||||
}
|
||||
wrk.Wait()
|
||||
|
||||
NotEqual(t, wrk.Error(), nil)
|
||||
Equal(t, wrk.Error().Error()[0:90], "ERROR: Work Unit failed due to a recoverable error: 'OMG OMG OMG! something bad happened!'")
|
||||
}
|
77
vendor/gopkg.in/go-playground/pool.v3/work_unit.go
generated
vendored
77
vendor/gopkg.in/go-playground/pool.v3/work_unit.go
generated
vendored
|
@ -1,77 +0,0 @@
|
|||
package pool
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// WorkUnit contains a single uint of works values
|
||||
type WorkUnit interface {
|
||||
|
||||
// Wait blocks until WorkUnit has been processed or cancelled
|
||||
Wait()
|
||||
|
||||
// Value returns the work units return value
|
||||
Value() interface{}
|
||||
|
||||
// Error returns the Work Unit's error
|
||||
Error() error
|
||||
|
||||
// Cancel cancels this specific unit of work, if not already committed
|
||||
// to processing.
|
||||
Cancel()
|
||||
|
||||
// IsCancelled returns if the Work Unit has been cancelled.
|
||||
// NOTE: After Checking IsCancelled(), if it returns false the
|
||||
// Work Unit can no longer be cancelled and will use your returned values.
|
||||
IsCancelled() bool
|
||||
}
|
||||
|
||||
var _ WorkUnit = new(workUnit)
|
||||
|
||||
// workUnit contains a single unit of works values
|
||||
type workUnit struct {
|
||||
value interface{}
|
||||
err error
|
||||
done chan struct{}
|
||||
fn WorkFunc
|
||||
cancelled atomic.Value
|
||||
cancelling atomic.Value
|
||||
writing atomic.Value
|
||||
}
|
||||
|
||||
// Cancel cancels this specific unit of work, if not already committed to processing.
|
||||
func (wu *workUnit) Cancel() {
|
||||
wu.cancelWithError(&ErrCancelled{s: errCancelled})
|
||||
}
|
||||
|
||||
func (wu *workUnit) cancelWithError(err error) {
|
||||
|
||||
wu.cancelling.Store(struct{}{})
|
||||
|
||||
if wu.writing.Load() == nil && wu.cancelled.Load() == nil {
|
||||
wu.cancelled.Store(struct{}{})
|
||||
wu.err = err
|
||||
close(wu.done)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until WorkUnit has been processed or cancelled
|
||||
func (wu *workUnit) Wait() {
|
||||
<-wu.done
|
||||
}
|
||||
|
||||
// Value returns the work units return value
|
||||
func (wu *workUnit) Value() interface{} {
|
||||
return wu.value
|
||||
}
|
||||
|
||||
// Error returns the Work Unit's error
|
||||
func (wu *workUnit) Error() error {
|
||||
return wu.err
|
||||
}
|
||||
|
||||
// IsCancelled returns if the Work Unit has been cancelled.
|
||||
// NOTE: After Checking IsCancelled(), if it returns false the
|
||||
// Work Unit can no longer be cancelled and will use your returned values.
|
||||
func (wu *workUnit) IsCancelled() bool {
|
||||
wu.writing.Store(struct{}{}) // ensure that after this check we are committed as cannot be cancelled if not aalready
|
||||
return wu.cancelled.Load() != nil
|
||||
}
|
Loading…
Reference in a new issue