فهرست منبع

Merge pull request #44 from vmarkovtsev/master

Vendor github.com/Jeffail/tunny
Vadim Markovtsev 7 سال پیش
والد
کامیت
9413d4be24

+ 19 - 0
vendor/github.com/jeffail/tunny/LICENSE

@@ -0,0 +1,19 @@
+Copyright (c) 2014 Ashley Jeffs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 229 - 0
vendor/github.com/jeffail/tunny/README.md

@@ -0,0 +1,229 @@
+![Tunny](tunny_logo.png "Tunny")
+
+Tunny is a Golang library for spawning and managing a goroutine pool.
+
+The API is synchronous and simple to use. Jobs are allocated to a worker when one becomes available.
+
+https://godoc.org/github.com/Jeffail/tunny
+
+## How to install:
+
+```bash
+go get github.com/jeffail/tunny
+```
+
+## How to use:
+
+The most obvious use for a goroutine pool would be limiting heavy jobs to the number of CPUs available. In the example below we limit the work from arbitrary numbers of HTTP request goroutines through our pool.
+
+```go
+package main
+
+import (
+	"io/ioutil"
+	"net/http"
+	"runtime"
+
+	"github.com/jeffail/tunny"
+)
+
+func main() {
+	numCPUs := runtime.NumCPU()
+	runtime.GOMAXPROCS(numCPUs+1) // numCPUs hot threads + one for async tasks.
+
+	pool, _ := tunny.CreatePool(numCPUs, func(object interface{}) interface{} {
+		input, _ := object.([]byte)
+
+		// Do something that takes a lot of work
+		output := input
+
+		return output
+	}).Open()
+
+	defer pool.Close()
+
+	http.HandleFunc("/work", func(w http.ResponseWriter, r *http.Request) {
+		input, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			http.Error(w, "Internal error", http.StatusInternalServerError)
+		}
+
+		// Send work to our pool
+		result, _ := pool.SendWork(input)
+
+		w.Write(result.([]byte))
+	})
+
+	http.ListenAndServe(":8080", nil)
+}
+```
+
+Tunny supports timeouts. You can replace the `SendWork` call above to the following:
+
+```go
+		// Or, alternatively, send it with a timeout (in this case 5 seconds).
+		result, err := pool.SendWorkTimed(5000, input)
+		if err != nil {
+			http.Error(w, "Request timed out", http.StatusRequestTimeout)
+		}
+```
+
+## Can I send a closure instead of data?
+
+Yes, the arguments passed to the worker are boxed as interface{}, so this can actually be a func, you can implement this yourself, or if you're not bothered about return values you can use:
+
+```go
+exampleChannel := make(chan int)
+
+pool, _ := tunny.CreatePoolGeneric(numCPUs).Open()
+
+err := pool.SendWork(func() {
+	/* Do your hard work here, usual rules of closures apply here,
+	 * so you can return values like so:
+	 */
+	exampleChannel <- 10
+})
+
+if err != nil {
+	// You done goofed
+}
+```
+
+## How do I give my workers state?
+
+Tunny workers implement the `TunnyWorkers` interface, simply implement this interface to have your own objects (and state) act as your workers.
+
+```go
+/*
+TunnyWorker - The basic interface of a tunny worker.
+*/
+type TunnyWorker interface {
+
+	// Called for each job, expects the result to be returned synchronously
+	TunnyJob(interface{}) interface{}
+
+	// Called after each job, this indicates whether the worker is ready for the next job.
+	// The default implementation is to return true always. If false is returned then the
+	// method is called every five milliseconds until either true is returned or the pool
+	// is closed.
+	TunnyReady() bool
+}
+```
+
+Here is a short example:
+
+```go
+type customWorker struct {
+	// TODO: Put some state here
+}
+
+// Use this call to block further jobs if necessary
+func (worker *customWorker) TunnyReady() bool {
+	return true
+}
+
+// This is where the work actually happens
+func (worker *customWorker) TunnyJob(data interface{}) interface{} {
+	/* TODO: Use and modify state
+	 * there's no need for thread safety paradigms here unless the
+	 * data is being accessed from another goroutine outside of
+	 * the pool.
+	 */
+	if outputStr, ok := data.(string); ok {
+		return ("custom job done: " + outputStr)
+	}
+	return nil
+}
+
+func TestCustomWorkers (t *testing.T) {
+	outChan := make(chan int, 10)
+
+	wg := new(sync.WaitGroup)
+	wg.Add(10)
+
+	workers := make([]tunny.TunnyWorker, 4)
+	for i, _ := range workers {
+		workers[i] = &(customWorker{})
+	}
+
+	pool, _ := tunny.CreateCustomPool(workers).Open()
+
+	defer pool.Close()
+
+	for i := 0; i < 10; i++ {
+		go func() {
+			value, _ := pool.SendWork("hello world")
+			fmt.Println(value.(string))
+
+			wg.Done()
+		}()
+	}
+
+	wg.Wait()
+}
+```
+
+The TunnyReady method allows you to use your state to determine whether or not a worker should take on another job. For example, your worker could hold a counter of how many jobs it has done, and perhaps after a certain amount it should perform another act before taking on more work, it's important to use TunnyReady for these occasions since blocking the TunnyJob call will hold up the waiting client.
+
+It is recommended that you do not block TunnyReady() whilst you wait for some condition to change, since this can prevent the pool from closing the worker goroutines. Currently, TunnyReady is called at 5 millisecond intervals until you answer true or the pool is closed.
+
+## I need more control
+
+You crazy fool, let's take this up to the next level. You can optionally implement `TunnyExtendedWorker` for more control.
+
+```go
+/*
+TunnyExtendedWorker - An optional interface that can be implemented if the worker needs
+more control over its state.
+*/
+type TunnyExtendedWorker interface {
+
+	// Called when the pool is opened, this will be called before any jobs are sent.
+	TunnyInitialize()
+
+	// Called when the pool is closed, this will be called after all jobs are completed.
+	TunnyTerminate()
+}
+```
+
+## Can a worker detect when a timeout occurs?
+
+Yes, you can also implement the `TunnyInterruptable` interface.
+
+```go
+/*
+TunnyInterruptable - An optional interface that can be implemented in order to allow the
+worker to drop jobs when they are abandoned.
+*/
+type TunnyInterruptable interface {
+
+	// Called when the current job has been abandoned by the client.
+	TunnyInterrupt()
+}
+```
+
+This method will be called in the event that a timeout occurs whilst waiting for the result. `TunnyInterrupt` is called from a newly spawned goroutine, so you'll need to create your own mechanism for stopping your worker mid-way through a job.
+
+## Can SendWork be called asynchronously?
+
+There are the helper functions SendWorkAsync and SendWorkTimedAsync, that are the same as their respective sync calls with an optional second argument func(interface{}, error), this is the call made when a result is returned and can be nil if there is no need for the closure.
+
+However, if you find yourself in a situation where the sync return is not necessary then chances are you don't actually need Tunny at all. Golang is all about making concurrent programming simple by nature, and using Tunny for implementing simple async worker calls defeats the great work of the language spec and adds overhead that isn't necessary.
+
+## Behaviours and caveats:
+
+### - Workers request jobs on an ad-hoc basis
+
+When there is a backlog of jobs waiting to be serviced, and all workers are occupied, a job will not be assigned to a worker until it is already prepared for its next job. This means workers do not develop their own individual queues. Instead, the backlog is shared by the entire pool.
+
+This means an individual worker is able to halt, or spend exceptional lengths of time on a single request without hindering the flow of any other requests, provided there are other active workers in the pool.
+
+### - A job can be dropped before work is begun
+
+Tunny has support for specified timeouts at the work request level, if this timeout is triggered whilst waiting for a worker to become available then the request is dropped entirely and no effort is wasted on the abandoned request.
+
+### - Backlogged jobs are FIFO, for now
+
+When a job arrives and all workers are occupied the waiting thread will lock at a select block whilst waiting to be assigned a worker. In practice this seems to create a FIFO queue, implying that this is how the implementation of Golang has dealt with select blocks, channels and multiple reading goroutines.
+
+However, I haven't found a guarantee of this behaviour in the Golang documentation, so I cannot guarantee that this will always be the case.

+ 379 - 0
vendor/github.com/jeffail/tunny/tunny.go

@@ -0,0 +1,379 @@
+/*
+Copyright (c) 2014 Ashley Jeffs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+// Package tunny implements a simple pool for maintaining independant worker goroutines.
+package tunny
+
+import (
+	"errors"
+	"expvar"
+	"reflect"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// Errors that are used throughout the Tunny API.
+var (
+	ErrPoolAlreadyRunning = errors.New("the pool is already running")
+	ErrPoolNotRunning     = errors.New("the pool is not running")
+	ErrJobNotFunc         = errors.New("generic worker not given a func()")
+	ErrWorkerClosed       = errors.New("worker was closed")
+	ErrJobTimedOut        = errors.New("job request timed out")
+)
+
+/*
+TunnyWorker - The basic interface of a tunny worker.
+*/
+type TunnyWorker interface {
+
+	// Called for each job, expects the result to be returned synchronously
+	TunnyJob(interface{}) interface{}
+
+	// Called after each job, this indicates whether the worker is ready for the next job.
+	// The default implementation is to return true always. If false is returned then the
+	// method is called every five milliseconds until either true is returned or the pool
+	// is closed. For efficiency you should have this call block until your worker is ready,
+	// otherwise you introduce a 5ms latency between jobs.
+	TunnyReady() bool
+}
+
+/*
+TunnyExtendedWorker - An optional interface that can be implemented if the worker needs
+more control over its state.
+*/
+type TunnyExtendedWorker interface {
+
+	// Called when the pool is opened, this will be called before any jobs are sent.
+	TunnyInitialize()
+
+	// Called when the pool is closed, this will be called after all jobs are completed.
+	TunnyTerminate()
+}
+
+/*
+TunnyInterruptable - An optional interface that can be implemented in order to allow the
+worker to drop jobs when they are abandoned.
+*/
+type TunnyInterruptable interface {
+
+	// Called when the current job has been abandoned by the client.
+	TunnyInterrupt()
+}
+
+/*
+Default and very basic implementation of a tunny worker. This worker holds a closure which
+is assigned at construction, and this closure is called on each job.
+*/
+type tunnyDefaultWorker struct {
+	job *func(interface{}) interface{}
+}
+
+func (worker *tunnyDefaultWorker) TunnyJob(data interface{}) interface{} {
+	return (*worker.job)(data)
+}
+
+func (worker *tunnyDefaultWorker) TunnyReady() bool {
+	return true
+}
+
+/*
+WorkPool contains the structures and methods required to communicate with your pool, it must
+be opened before sending work and closed when all jobs are completed.
+
+You may open and close a pool as many times as you wish, calling close is a blocking call that
+guarantees all goroutines are stopped.
+*/
+type WorkPool struct {
+	workers          []*workerWrapper
+	selects          []reflect.SelectCase
+	statusMutex      sync.RWMutex
+	running          uint32
+	pendingAsyncJobs int32
+}
+
+func (pool *WorkPool) isRunning() bool {
+	return (atomic.LoadUint32(&pool.running) == 1)
+}
+
+func (pool *WorkPool) setRunning(running bool) {
+	if running {
+		atomic.SwapUint32(&pool.running, 1)
+	} else {
+		atomic.SwapUint32(&pool.running, 0)
+	}
+}
+
+/*
+Open all channels and launch the background goroutines managed by the pool.
+*/
+func (pool *WorkPool) Open() (*WorkPool, error) {
+	pool.statusMutex.Lock()
+	defer pool.statusMutex.Unlock()
+
+	if !pool.isRunning() {
+
+		pool.selects = make([]reflect.SelectCase, len(pool.workers))
+
+		for i, workerWrapper := range pool.workers {
+			workerWrapper.Open()
+
+			pool.selects[i] = reflect.SelectCase{
+				Dir:  reflect.SelectRecv,
+				Chan: reflect.ValueOf(workerWrapper.readyChan),
+			}
+		}
+
+		pool.setRunning(true)
+		return pool, nil
+
+	}
+	return nil, ErrPoolAlreadyRunning
+}
+
+/*
+Close all channels and goroutines managed by the pool.
+*/
+func (pool *WorkPool) Close() error {
+	pool.statusMutex.Lock()
+	defer pool.statusMutex.Unlock()
+
+	if pool.isRunning() {
+		for _, workerWrapper := range pool.workers {
+			workerWrapper.Close()
+		}
+		for _, workerWrapper := range pool.workers {
+			workerWrapper.Join()
+		}
+		pool.setRunning(false)
+		return nil
+	}
+	return ErrPoolNotRunning
+}
+
+/*
+CreatePool - Creates a pool of workers, and takes a closure argument which is the action
+to perform for each job.
+*/
+func CreatePool(numWorkers int, job func(interface{}) interface{}) *WorkPool {
+	pool := WorkPool{running: 0}
+
+	pool.workers = make([]*workerWrapper, numWorkers)
+	for i := range pool.workers {
+		newWorker := workerWrapper{
+			worker: &(tunnyDefaultWorker{&job}),
+		}
+		pool.workers[i] = &newWorker
+	}
+
+	return &pool
+}
+
+/*
+CreatePoolGeneric - Creates a pool of generic workers. When sending work to a pool of
+generic workers you send a closure (func()) which is the job to perform.
+*/
+func CreatePoolGeneric(numWorkers int) *WorkPool {
+
+	return CreatePool(numWorkers, func(jobCall interface{}) interface{} {
+		if method, ok := jobCall.(func()); ok {
+			method()
+			return nil
+		}
+		return ErrJobNotFunc
+	})
+
+}
+
+/*
+CreateCustomPool - Creates a pool for an array of custom workers. The custom workers
+must implement TunnyWorker, and may also optionally implement TunnyExtendedWorker and
+TunnyInterruptable.
+*/
+func CreateCustomPool(customWorkers []TunnyWorker) *WorkPool {
+	pool := WorkPool{running: 0}
+
+	pool.workers = make([]*workerWrapper, len(customWorkers))
+	for i := range pool.workers {
+		newWorker := workerWrapper{
+			worker: customWorkers[i],
+		}
+		pool.workers[i] = &newWorker
+	}
+
+	return &pool
+}
+
+/*
+SendWorkTimed - Send a job to a worker and return the result, this is a synchronous
+call with a timeout.
+*/
+func (pool *WorkPool) SendWorkTimed(milliTimeout time.Duration, jobData interface{}) (interface{}, error) {
+	pool.statusMutex.RLock()
+	defer pool.statusMutex.RUnlock()
+
+	if pool.isRunning() {
+		before := time.Now()
+
+		// Create a new time out timer
+		timeout := time.NewTimer(milliTimeout * time.Millisecond)
+		defer timeout.Stop()
+
+		// Create new selectcase[] and add time out case
+		selectCases := append(pool.selects[:], reflect.SelectCase{
+			Dir:  reflect.SelectRecv,
+			Chan: reflect.ValueOf(timeout.C),
+		})
+
+		// Wait for workers, or time out
+		if chosen, _, ok := reflect.Select(selectCases); ok {
+
+			// Check if the selected index is a worker, otherwise we timed out
+			if chosen < (len(selectCases) - 1) {
+				pool.workers[chosen].jobChan <- jobData
+
+				timeoutRemain := time.NewTimer((milliTimeout * time.Millisecond) - time.Since(before))
+				defer timeoutRemain.Stop()
+
+				// Wait for response, or time out
+				select {
+				case data, open := <-pool.workers[chosen].outputChan:
+					if !open {
+						return nil, ErrWorkerClosed
+					}
+					return data, nil
+				case <-timeoutRemain.C:
+					/* If we time out here we also need to ensure that the output is still
+					 * collected and that the worker can move on. Therefore, we fork the
+					 * waiting process into a new goroutine.
+					 */
+					go func() {
+						pool.workers[chosen].Interrupt()
+						<-pool.workers[chosen].outputChan
+					}()
+					return nil, ErrJobTimedOut
+				}
+			} else {
+				return nil, ErrJobTimedOut
+			}
+		} else {
+			// This means the chosen channel was closed
+			return nil, ErrWorkerClosed
+		}
+	} else {
+		return nil, ErrPoolNotRunning
+	}
+}
+
+/*
+SendWorkTimedAsync - Send a timed job to a worker without blocking, and optionally
+send the result to a receiving closure. You may set the closure to nil if no
+further actions are required.
+*/
+func (pool *WorkPool) SendWorkTimedAsync(
+	milliTimeout time.Duration,
+	jobData interface{},
+	after func(interface{}, error),
+) {
+	atomic.AddInt32(&pool.pendingAsyncJobs, 1)
+	go func() {
+		defer atomic.AddInt32(&pool.pendingAsyncJobs, -1)
+		result, err := pool.SendWorkTimed(milliTimeout, jobData)
+		if after != nil {
+			after(result, err)
+		}
+	}()
+}
+
+/*
+SendWork - Send a job to a worker and return the result, this is a synchronous call.
+*/
+func (pool *WorkPool) SendWork(jobData interface{}) (interface{}, error) {
+	pool.statusMutex.RLock()
+	defer pool.statusMutex.RUnlock()
+
+	if pool.isRunning() {
+		if chosen, _, ok := reflect.Select(pool.selects); ok && chosen >= 0 {
+			pool.workers[chosen].jobChan <- jobData
+			result, open := <-pool.workers[chosen].outputChan
+
+			if !open {
+				return nil, ErrWorkerClosed
+			}
+			return result, nil
+		}
+		return nil, ErrWorkerClosed
+	}
+	return nil, ErrPoolNotRunning
+}
+
+/*
+SendWorkAsync - Send a job to a worker without blocking, and optionally send the
+result to a receiving closure. You may set the closure to nil if no further actions
+are required.
+*/
+func (pool *WorkPool) SendWorkAsync(jobData interface{}, after func(interface{}, error)) {
+	atomic.AddInt32(&pool.pendingAsyncJobs, 1)
+	go func() {
+		defer atomic.AddInt32(&pool.pendingAsyncJobs, -1)
+		result, err := pool.SendWork(jobData)
+		if after != nil {
+			after(result, err)
+		}
+	}()
+}
+
+/*
+NumPendingAsyncJobs - Get the current count of async jobs either in flight, or waiting for a worker
+*/
+func (pool *WorkPool) NumPendingAsyncJobs() int32 {
+	return atomic.LoadInt32(&pool.pendingAsyncJobs)
+}
+
+/*
+NumWorkers - Number of workers in the pool
+*/
+func (pool *WorkPool) NumWorkers() int {
+	return len(pool.workers)
+}
+
+type liveVarAccessor func() string
+
+func (a liveVarAccessor) String() string {
+	return a()
+}
+
+/*
+PublishExpvarMetrics - Publishes the NumWorkers and NumPendingAsyncJobs to expvars
+*/
+func (pool *WorkPool) PublishExpvarMetrics(poolName string) {
+	ret := expvar.NewMap(poolName)
+	asyncJobsFn := func() string {
+		return strconv.FormatInt(int64(pool.NumPendingAsyncJobs()), 10)
+	}
+	numWorkersFn := func() string {
+		return strconv.FormatInt(int64(pool.NumWorkers()), 10)
+	}
+	ret.Set("pendingAsyncJobs", liveVarAccessor(asyncJobsFn))
+	ret.Set("numWorkers", liveVarAccessor(numWorkersFn))
+}

BIN
vendor/github.com/jeffail/tunny/tunny_logo.png


+ 286 - 0
vendor/github.com/jeffail/tunny/tunny_test.go

@@ -0,0 +1,286 @@
+/*
+Copyright (c) 2014 Ashley Jeffs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+package tunny
+
+import (
+	"sync"
+	"testing"
+	"time"
+)
+
+/*--------------------------------------------------------------------------------------------------
+ */
+
+func TestBasicJob(t *testing.T) {
+	pool, err := CreatePool(1, func(in interface{}) interface{} {
+		intVal := in.(int)
+		return intVal * 2
+	}).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+
+	for i := 0; i < 1; i++ {
+		ret, err := pool.SendWork(10)
+		if err != nil {
+			t.Errorf("Failed to send work: %v", err)
+			return
+		}
+		retInt := ret.(int)
+		if ret != 20 {
+			t.Errorf("Wrong return value: %v != %v", 20, retInt)
+		}
+	}
+}
+
+func TestParallelJobs(t *testing.T) {
+	nWorkers := 10
+
+	jobGroup := sync.WaitGroup{}
+	testGroup := sync.WaitGroup{}
+
+	pool, err := CreatePool(nWorkers, func(in interface{}) interface{} {
+		jobGroup.Done()
+		jobGroup.Wait()
+
+		intVal := in.(int)
+		return intVal * 2
+	}).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+
+	for j := 0; j < 1; j++ {
+		jobGroup.Add(nWorkers)
+		testGroup.Add(nWorkers)
+
+		for i := 0; i < nWorkers; i++ {
+			go func() {
+				ret, err := pool.SendWork(10)
+				if err != nil {
+					t.Errorf("Failed to send work: %v", err)
+					return
+				}
+				retInt := ret.(int)
+				if ret != 20 {
+					t.Errorf("Wrong return value: %v != %v", 20, retInt)
+				}
+
+				testGroup.Done()
+			}()
+		}
+
+		testGroup.Wait()
+	}
+}
+
+/*--------------------------------------------------------------------------------------------------
+ */
+
+// Basic worker implementation
+type dummyWorker struct {
+	ready bool
+	t     *testing.T
+}
+
+func (d *dummyWorker) TunnyJob(in interface{}) interface{} {
+	if !d.ready {
+		d.t.Errorf("TunnyJob called without polling TunnyReady")
+	}
+	d.ready = false
+	return in
+}
+
+func (d *dummyWorker) TunnyReady() bool {
+	d.ready = true
+	return d.ready
+}
+
+// Test the pool with a basic worker implementation
+func TestDummyWorker(t *testing.T) {
+	pool, err := CreateCustomPool([]TunnyWorker{&dummyWorker{t: t}}).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+
+	for i := 0; i < 100; i++ {
+		if result, err := pool.SendWork(12); err != nil {
+			t.Errorf("Failed to send work: %v", err)
+		} else if resInt, ok := result.(int); !ok || resInt != 12 {
+			t.Errorf("Unexpected result from job: %v != %v", 12, result)
+		}
+	}
+}
+
+// Extended worker implementation
+type dummyExtWorker struct {
+	dummyWorker
+
+	initialized bool
+}
+
+func (d *dummyExtWorker) TunnyJob(in interface{}) interface{} {
+	if !d.initialized {
+		d.t.Errorf("TunnyJob called without calling TunnyInitialize")
+	}
+	return d.dummyWorker.TunnyJob(in)
+}
+
+func (d *dummyExtWorker) TunnyInitialize() {
+	d.initialized = true
+}
+
+func (d *dummyExtWorker) TunnyTerminate() {
+	if !d.initialized {
+		d.t.Errorf("TunnyTerminate called without calling TunnyInitialize")
+	}
+	d.initialized = false
+}
+
+// Test the pool with an extended worker implementation
+func TestDummyExtWorker(t *testing.T) {
+	pool, err := CreateCustomPool(
+		[]TunnyWorker{
+			&dummyExtWorker{
+				dummyWorker: dummyWorker{t: t},
+			},
+		}).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+
+	for i := 0; i < 100; i++ {
+		if result, err := pool.SendWork(12); err != nil {
+			t.Errorf("Failed to send work: %v", err)
+		} else if resInt, ok := result.(int); !ok || resInt != 12 {
+			t.Errorf("Unexpected result from job: %v != %v", 12, result)
+		}
+	}
+}
+
+// Extended and interruptible worker implementation
+type dummyExtIntWorker struct {
+	dummyExtWorker
+
+	jobLock *sync.Mutex
+}
+
+func (d *dummyExtIntWorker) TunnyJob(in interface{}) interface{} {
+	d.jobLock.Lock()
+	d.jobLock.Unlock()
+
+	return d.dummyExtWorker.TunnyJob(in)
+}
+
+func (d *dummyExtIntWorker) TunnyReady() bool {
+	d.jobLock.Lock()
+
+	return d.dummyExtWorker.TunnyReady()
+}
+
+func (d *dummyExtIntWorker) TunnyInterrupt() {
+	d.jobLock.Unlock()
+}
+
+// Test the pool with an extended and interruptible worker implementation
+func TestDummyExtIntWorker(t *testing.T) {
+	pool, err := CreateCustomPool(
+		[]TunnyWorker{
+			&dummyExtIntWorker{
+				dummyExtWorker: dummyExtWorker{
+					dummyWorker: dummyWorker{t: t},
+				},
+				jobLock: &sync.Mutex{},
+			},
+		}).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+
+	for i := 0; i < 100; i++ {
+		if _, err := pool.SendWorkTimed(1, nil); err == nil {
+			t.Errorf("Expected timeout from dummyExtIntWorker.")
+		}
+	}
+}
+
+func TestNumWorkers(t *testing.T) {
+	numWorkers := 10
+	pool, err := CreatePoolGeneric(numWorkers).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+	actual := pool.NumWorkers()
+	if actual != numWorkers {
+		t.Errorf("Expected to get %d workers, but got %d", numWorkers, actual)
+	}
+}
+
+var waitHalfSecond = func() {
+	time.Sleep(500 * time.Millisecond)
+}
+
+func TestNumPendingReportsAllWorkersWithNoWork(t *testing.T) {
+	numWorkers := 10
+	pool, err := CreatePoolGeneric(numWorkers).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+	actual := pool.NumPendingAsyncJobs()
+	if actual != 0 {
+		t.Errorf("Expected to get 0 pending jobs when pool is quiet, but got %d", actual)
+	}
+}
+
+func TestNumPendingReportsNotAllWorkersWhenSomeBusy(t *testing.T) {
+	numWorkers := 10
+	pool, err := CreatePoolGeneric(numWorkers).Open()
+	if err != nil {
+		t.Errorf("Failed to create pool: %v", err)
+		return
+	}
+	defer pool.Close()
+	pool.SendWorkAsync(waitHalfSecond, nil)
+	actual := pool.NumPendingAsyncJobs()
+	expected := int32(1)
+	if actual != expected {
+		t.Errorf("Expected to get %d pending jobs when pool has work, but got %d", expected, actual)
+	}
+}
+
+/*--------------------------------------------------------------------------------------------------
+ */

+ 110 - 0
vendor/github.com/jeffail/tunny/worker.go

@@ -0,0 +1,110 @@
+/*
+Copyright (c) 2014 Ashley Jeffs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+package tunny
+
+import (
+	"sync/atomic"
+	"time"
+)
+
+type workerWrapper struct {
+	readyChan  chan int
+	jobChan    chan interface{}
+	outputChan chan interface{}
+	poolOpen   uint32
+	worker     TunnyWorker
+}
+
+func (wrapper *workerWrapper) Loop() {
+
+	// TODO: Configure?
+	tout := time.Duration(5)
+
+	for !wrapper.worker.TunnyReady() {
+		// It's sad that we can't simply check if jobChan is closed here.
+		if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
+			break
+		}
+		time.Sleep(tout * time.Millisecond)
+	}
+
+	wrapper.readyChan <- 1
+
+	for data := range wrapper.jobChan {
+		wrapper.outputChan <- wrapper.worker.TunnyJob(data)
+		for !wrapper.worker.TunnyReady() {
+			if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
+				break
+			}
+			time.Sleep(tout * time.Millisecond)
+		}
+		wrapper.readyChan <- 1
+	}
+
+	close(wrapper.readyChan)
+	close(wrapper.outputChan)
+
+}
+
+func (wrapper *workerWrapper) Open() {
+	if extWorker, ok := wrapper.worker.(TunnyExtendedWorker); ok {
+		extWorker.TunnyInitialize()
+	}
+
+	wrapper.readyChan = make(chan int)
+	wrapper.jobChan = make(chan interface{})
+	wrapper.outputChan = make(chan interface{})
+
+	atomic.SwapUint32(&wrapper.poolOpen, uint32(1))
+
+	go wrapper.Loop()
+}
+
+// Follow this with Join(), otherwise terminate isn't called on the worker
+func (wrapper *workerWrapper) Close() {
+	close(wrapper.jobChan)
+
+	// Breaks the worker out of a Ready() -> false loop
+	atomic.SwapUint32(&wrapper.poolOpen, uint32(0))
+}
+
+func (wrapper *workerWrapper) Join() {
+	// Ensure that both the ready and output channels are closed
+	for {
+		_, readyOpen := <-wrapper.readyChan
+		_, outputOpen := <-wrapper.outputChan
+		if !readyOpen && !outputOpen {
+			break
+		}
+	}
+
+	if extWorker, ok := wrapper.worker.(TunnyExtendedWorker); ok {
+		extWorker.TunnyTerminate()
+	}
+}
+
+func (wrapper *workerWrapper) Interrupt() {
+	if extWorker, ok := wrapper.worker.(TunnyInterruptable); ok {
+		extWorker.TunnyInterrupt()
+	}
+}