Browse Source

Fix golint warnings in files not on the top level

Vadim Markovtsev 7 years ago
parent
commit
67e185333d

+ 3 - 1
cmd/hercules/embed.go

@@ -19,7 +19,9 @@ func main() {
 		panic(err)
 	}
 	defer file.Close()
-	file.WriteString("package main\n\nconst PLUGIN_TEMPLATE_SOURCE = `")
+	file.WriteString("package main\n\n" +
+		"// PluginTemplateSource is the source code template of a Hercules plugin.\n" +
+		"const PluginTemplateSource = `")
 	file.Write(contents)
 	file.WriteString("`\n")
 }

+ 4 - 3
cmd/hercules/generate_plugin.go

@@ -17,7 +17,8 @@ import (
 
 //go:generate go run embed.go
 
-var SHLIB_EXT = map[string]string{
+// ShlibExts is the mapping between platform names and shared library file name extensions.
+var ShlibExts = map[string]string{
 	"window":  "dll",
 	"linux":   "so",
 	"darwin":  "dylib",
@@ -44,7 +45,7 @@ var generatePluginCmd = &cobra.Command{
 			panic(err)
 		}
 		outputPath := path.Join(outputDir, strings.ToLower(strings.Join(splitted, "_"))+".go")
-		gen := template.Must(template.New("plugin").Parse(PLUGIN_TEMPLATE_SOURCE))
+		gen := template.Must(template.New("plugin").Parse(PluginTemplateSource))
 		outFile, err := os.Create(outputPath)
 		if err != nil {
 			panic(err)
@@ -57,7 +58,7 @@ var generatePluginCmd = &cobra.Command{
 			flag = strings.ToLower(strings.Join(splitted, "-"))
 		}
 		outputBase := path.Base(outputPath)
-		shlib := outputBase[:len(outputBase)-2] + SHLIB_EXT[runtime.GOOS]
+		shlib := outputBase[:len(outputBase)-2] + ShlibExts[runtime.GOOS]
 		protoBuf := outputPath[:len(outputPath)-3] + ".proto"
 		pbGo := outputPath[:len(outputPath)-3] + ".pb.go"
 		dict := map[string]string{

+ 5 - 3
cmd/hercules/root.go

@@ -30,11 +30,13 @@ import (
 	"gopkg.in/src-d/hercules.v3/pb"
 )
 
-type OneLineWriter struct {
+// oneLineWriter splits the output data by lines and outputs one on top of another using '\r'.
+// It also does some dark magic to handle Git statuses.
+type oneLineWriter struct {
 	Writer io.Writer
 }
 
-func (writer OneLineWriter) Write(p []byte) (n int, err error) {
+func (writer oneLineWriter) Write(p []byte) (n int, err error) {
 	if p[len(p)-1] == '\n' {
 		p = p[:len(p)-1]
 		if len(p) > 5 && bytes.Compare(p[len(p)-5:], []byte("done.")) == 0 {
@@ -68,7 +70,7 @@ func loadRepository(uri string, cachePath string, disableStatus bool) *git.Repos
 		cloneOptions := &git.CloneOptions{URL: uri}
 		if !disableStatus {
 			fmt.Fprint(os.Stderr, "connecting...\r")
-			cloneOptions.Progress = OneLineWriter{Writer: os.Stderr}
+			cloneOptions.Progress = oneLineWriter{Writer: os.Stderr}
 		}
 		repository, err = git.Clone(backend, nil, cloneOptions)
 		if !disableStatus {

+ 8 - 1
pb/utils.go

@@ -2,6 +2,8 @@ package pb
 
 import "sort"
 
+// ToBurndownSparseMatrix converts a rectangular integer matrix to the corresponding Protobuf object.
+// It is specific to hercules.BurndownAnalysis.
 func ToBurndownSparseMatrix(matrix [][]int64, name string) *BurndownSparseMatrix {
 	if len(matrix) == 0 {
 		panic("matrix may not be nil or empty")
@@ -37,6 +39,8 @@ func ToBurndownSparseMatrix(matrix [][]int64, name string) *BurndownSparseMatrix
 	return &r
 }
 
+// DenseToCompressedSparseRowMatrix takes an integer matrix and converts it to a Protobuf CSR.
+// CSR format: https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_.28CSR.2C_CRS_or_Yale_format.29
 func DenseToCompressedSparseRowMatrix(matrix [][]int64) *CompressedSparseRowMatrix {
 	r := CompressedSparseRowMatrix{
 		NumberOfRows:    int32(len(matrix)),
@@ -52,7 +56,7 @@ func DenseToCompressedSparseRowMatrix(matrix [][]int64) *CompressedSparseRowMatr
 			if col != 0 {
 				r.Data = append(r.Data, col)
 				r.Indices = append(r.Indices, int32(x))
-				nnz += 1
+				nnz++
 			}
 		}
 		r.Indptr = append(r.Indptr, r.Indptr[len(r.Indptr)-1]+int64(nnz))
@@ -60,6 +64,9 @@ func DenseToCompressedSparseRowMatrix(matrix [][]int64) *CompressedSparseRowMatr
 	return &r
 }
 
+// MapToCompressedSparseRowMatrix takes an integer matrix and converts it to a Protobuf CSR.
+// In contrast to DenseToCompressedSparseRowMatrix, a matrix here is already in DOK format.
+// CSR format: https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_.28CSR.2C_CRS_or_Yale_format.29
 func MapToCompressedSparseRowMatrix(matrix []map[int]int64) *CompressedSparseRowMatrix {
 	r := CompressedSparseRowMatrix{
 		NumberOfRows:    int32(len(matrix)),

+ 7 - 10
rbtree/rbtree.go

@@ -103,7 +103,7 @@ func (root *RBTree) Insert(item Item) (bool, Iterator) {
 	if n == nil {
 		return false, Iterator{}
 	}
-	ins_n := n
+	insN := n
 
 	n.color = red
 
@@ -159,7 +159,7 @@ func (root *RBTree) Insert(item Item) (bool, Iterator) {
 		}
 		break
 	}
-	return true, Iterator{root, ins_n}
+	return true, Iterator{root, insN}
 }
 
 // Delete an item with the given Key. Return true iff the item was
@@ -192,8 +192,8 @@ type Iterator struct {
 	node *node
 }
 
-func (iter Iterator) Equal(iter_ Iterator) bool {
-	return iter.node == iter_.node
+func (iter Iterator) Equal(other Iterator) bool {
+	return iter.node == other.node
 }
 
 // Check if the iterator points beyond the max element in the tree
@@ -414,9 +414,8 @@ func (root *RBTree) doInsert(item Item) *node {
 				root.count++
 				root.maybeSetMinNode(n)
 				return n
-			} else {
-				parent = parent.left
 			}
+			parent = parent.left
 		} else {
 			if parent.right == nil {
 				n := &node{item: item, parent: parent}
@@ -424,9 +423,8 @@ func (root *RBTree) doInsert(item Item) *node {
 				root.count++
 				root.maybeSetMaxNode(n)
 				return n
-			} else {
-				parent = parent.right
 			}
+			parent = parent.right
 		}
 	}
 	panic("should not reach here")
@@ -457,9 +455,8 @@ func (root *RBTree) findGE(key int) (*node, bool) {
 				succ := n.doNext()
 				if succ == nil {
 					return nil, false
-				} else {
-					return succ, (key == succ.item.Key)
 				}
+				return succ, key == succ.item.Key
 			}
 		}
 	}

+ 1 - 0
toposort/toposort.go

@@ -9,6 +9,7 @@ import (
 
 // Reworked from https://github.com/philopon/go-toposort
 
+// Graph represents a directed acyclic graph.
 type Graph struct {
 	// Outgoing connections for every node.
 	outputs map[string]map[string]int

+ 7 - 7
uast.go

@@ -78,18 +78,18 @@ type uastTask struct {
 }
 
 type worker struct {
-	Client *bblfsh.Client
-	Job    func(interface{}) interface{}
+	Client   *bblfsh.Client
+	Callback func(interface{}) interface{}
 }
 
-func (w worker) TunnyReady() bool {
+func (w worker) Ready() bool {
 	return true
 }
 
-func (w worker) TunnyJob(data interface{}) interface{} {
+func (w worker) Job(data interface{}) interface{} {
 	task := data.(uastTask)
 	task.Client = w.Client
-	return w.Job(task)
+	return w.Callback(task)
 }
 
 func (exr *UASTExtractor) Name() string {
@@ -188,9 +188,9 @@ func (exr *UASTExtractor) Initialize(repository *git.Repository) {
 	if exr.pool != nil {
 		exr.pool.Close()
 	}
-	workers := make([]tunny.TunnyWorker, poolSize)
+	workers := make([]tunny.Worker, poolSize)
 	for i := 0; i < poolSize; i++ {
-		workers[i] = worker{Client: exr.clients[i], Job: exr.extractTask}
+		workers[i] = worker{Client: exr.clients[i], Callback: exr.extractTask}
 	}
 	exr.pool, err = tunny.CreateCustomPool(workers).Open()
 	if err != nil {

+ 17 - 17
vendor/github.com/jeffail/tunny/tunny.go

@@ -43,39 +43,39 @@ var (
 )
 
 /*
-TunnyWorker - The basic interface of a tunny worker.
+Worker - The basic interface of a tunny worker.
 */
-type TunnyWorker interface {
+type Worker interface {
 
 	// Called for each job, expects the result to be returned synchronously
-	TunnyJob(interface{}) interface{}
+	Job(interface{}) interface{}
 
 	// Called after each job, this indicates whether the worker is ready for the next job.
 	// The default implementation is to return true always. If false is returned then the
 	// method is called every five milliseconds until either true is returned or the pool
 	// is closed. For efficiency you should have this call block until your worker is ready,
 	// otherwise you introduce a 5ms latency between jobs.
-	TunnyReady() bool
+	Ready() bool
 }
 
 /*
-TunnyExtendedWorker - An optional interface that can be implemented if the worker needs
+ExtendedWorker - An optional interface that can be implemented if the worker needs
 more control over its state.
 */
-type TunnyExtendedWorker interface {
+type ExtendedWorker interface {
 
 	// Called when the pool is opened, this will be called before any jobs are sent.
-	TunnyInitialize()
+	Initialize()
 
 	// Called when the pool is closed, this will be called after all jobs are completed.
-	TunnyTerminate()
+	Terminate()
 }
 
 /*
-TunnyInterruptable - An optional interface that can be implemented in order to allow the
+Interruptable - An optional interface that can be implemented in order to allow the
 worker to drop jobs when they are abandoned.
 */
-type TunnyInterruptable interface {
+type Interruptable interface {
 
 	// Called when the current job has been abandoned by the client.
 	TunnyInterrupt()
@@ -85,15 +85,15 @@ type TunnyInterruptable interface {
 Default and very basic implementation of a tunny worker. This worker holds a closure which
 is assigned at construction, and this closure is called on each job.
 */
-type tunnyDefaultWorker struct {
+type defaultWorker struct {
 	job *func(interface{}) interface{}
 }
 
-func (worker *tunnyDefaultWorker) TunnyJob(data interface{}) interface{} {
+func (worker *defaultWorker) Job(data interface{}) interface{} {
 	return (*worker.job)(data)
 }
 
-func (worker *tunnyDefaultWorker) TunnyReady() bool {
+func (worker *defaultWorker) Ready() bool {
 	return true
 }
 
@@ -181,7 +181,7 @@ func CreatePool(numWorkers int, job func(interface{}) interface{}) *WorkPool {
 	pool.workers = make([]*workerWrapper, numWorkers)
 	for i := range pool.workers {
 		newWorker := workerWrapper{
-			worker: &(tunnyDefaultWorker{&job}),
+			worker: &(defaultWorker{&job}),
 		}
 		pool.workers[i] = &newWorker
 	}
@@ -207,10 +207,10 @@ func CreatePoolGeneric(numWorkers int) *WorkPool {
 
 /*
 CreateCustomPool - Creates a pool for an array of custom workers. The custom workers
-must implement TunnyWorker, and may also optionally implement TunnyExtendedWorker and
-TunnyInterruptable.
+must implement Worker, and may also optionally implement ExtendedWorker and
+Interruptable.
 */
-func CreateCustomPool(customWorkers []TunnyWorker) *WorkPool {
+func CreateCustomPool(customWorkers []Worker) *WorkPool {
 	pool := WorkPool{running: 0}
 
 	pool.workers = make([]*workerWrapper, len(customWorkers))

+ 6 - 6
vendor/github.com/jeffail/tunny/tunny_test.go

@@ -109,7 +109,7 @@ type dummyWorker struct {
 
 func (d *dummyWorker) TunnyJob(in interface{}) interface{} {
 	if !d.ready {
-		d.t.Errorf("TunnyJob called without polling TunnyReady")
+		d.t.Errorf("Job called without polling Ready")
 	}
 	d.ready = false
 	return in
@@ -122,7 +122,7 @@ func (d *dummyWorker) TunnyReady() bool {
 
 // Test the pool with a basic worker implementation
 func TestDummyWorker(t *testing.T) {
-	pool, err := CreateCustomPool([]TunnyWorker{&dummyWorker{t: t}}).Open()
+	pool, err := CreateCustomPool([]Worker{&dummyWorker{t: t}}).Open()
 	if err != nil {
 		t.Errorf("Failed to create pool: %v", err)
 		return
@@ -147,7 +147,7 @@ type dummyExtWorker struct {
 
 func (d *dummyExtWorker) TunnyJob(in interface{}) interface{} {
 	if !d.initialized {
-		d.t.Errorf("TunnyJob called without calling TunnyInitialize")
+		d.t.Errorf("Job called without calling Initialize")
 	}
 	return d.dummyWorker.TunnyJob(in)
 }
@@ -158,7 +158,7 @@ func (d *dummyExtWorker) TunnyInitialize() {
 
 func (d *dummyExtWorker) TunnyTerminate() {
 	if !d.initialized {
-		d.t.Errorf("TunnyTerminate called without calling TunnyInitialize")
+		d.t.Errorf("Terminate called without calling Initialize")
 	}
 	d.initialized = false
 }
@@ -166,7 +166,7 @@ func (d *dummyExtWorker) TunnyTerminate() {
 // Test the pool with an extended worker implementation
 func TestDummyExtWorker(t *testing.T) {
 	pool, err := CreateCustomPool(
-		[]TunnyWorker{
+		[]Worker{
 			&dummyExtWorker{
 				dummyWorker: dummyWorker{t: t},
 			},
@@ -213,7 +213,7 @@ func (d *dummyExtIntWorker) TunnyInterrupt() {
 // Test the pool with an extended and interruptible worker implementation
 func TestDummyExtIntWorker(t *testing.T) {
 	pool, err := CreateCustomPool(
-		[]TunnyWorker{
+		[]Worker{
 			&dummyExtIntWorker{
 				dummyExtWorker: dummyExtWorker{
 					dummyWorker: dummyWorker{t: t},

+ 9 - 9
vendor/github.com/jeffail/tunny/worker.go

@@ -32,7 +32,7 @@ type workerWrapper struct {
 	jobChan    chan interface{}
 	outputChan chan interface{}
 	poolOpen   uint32
-	worker     TunnyWorker
+	worker     Worker
 }
 
 func (wrapper *workerWrapper) Loop() {
@@ -40,7 +40,7 @@ func (wrapper *workerWrapper) Loop() {
 	// TODO: Configure?
 	tout := time.Duration(5)
 
-	for !wrapper.worker.TunnyReady() {
+	for !wrapper.worker.Ready() {
 		// It's sad that we can't simply check if jobChan is closed here.
 		if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
 			break
@@ -51,8 +51,8 @@ func (wrapper *workerWrapper) Loop() {
 	wrapper.readyChan <- 1
 
 	for data := range wrapper.jobChan {
-		wrapper.outputChan <- wrapper.worker.TunnyJob(data)
-		for !wrapper.worker.TunnyReady() {
+		wrapper.outputChan <- wrapper.worker.Job(data)
+		for !wrapper.worker.Ready() {
 			if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
 				break
 			}
@@ -67,8 +67,8 @@ func (wrapper *workerWrapper) Loop() {
 }
 
 func (wrapper *workerWrapper) Open() {
-	if extWorker, ok := wrapper.worker.(TunnyExtendedWorker); ok {
-		extWorker.TunnyInitialize()
+	if extWorker, ok := wrapper.worker.(ExtendedWorker); ok {
+		extWorker.Initialize()
 	}
 
 	wrapper.readyChan = make(chan int)
@@ -98,13 +98,13 @@ func (wrapper *workerWrapper) Join() {
 		}
 	}
 
-	if extWorker, ok := wrapper.worker.(TunnyExtendedWorker); ok {
-		extWorker.TunnyTerminate()
+	if extWorker, ok := wrapper.worker.(ExtendedWorker); ok {
+		extWorker.Terminate()
 	}
 }
 
 func (wrapper *workerWrapper) Interrupt() {
-	if extWorker, ok := wrapper.worker.(TunnyInterruptable); ok {
+	if extWorker, ok := wrapper.worker.(Interruptable); ok {
 		extWorker.TunnyInterrupt()
 	}
 }

+ 6 - 0
yaml/utils.go

@@ -7,12 +7,18 @@ import (
 	"strings"
 )
 
+// SafeString returns a string which is sufficiently quoted and escaped for YAML.
 func SafeString(str string) string {
 	str = strings.Replace(str, "\\", "\\\\", -1)
 	str = strings.Replace(str, "\"", "\\\"", -1)
 	return "\"" + str + "\""
 }
 
+// PrintMatrix outputs a rectangular integer matrix in YAML text format.
+//
+// `indent` is the current YAML indentation level - the number of spaces.
+// `name` is the name of the corresponding YAML block. If empty, no separate block is created.
+// `fixNegative` changes all negative values to 0.
 func PrintMatrix(writer io.Writer, matrix [][]int64, indent int, name string, fixNegative bool) {
 	// determine the maximum length of each value
 	var maxnum int64 = -(1 << 32)