浏览代码

Merge pull request #46 from vmarkovtsev/master

gofmt -s
Vadim Markovtsev 7 年之前
父节点
当前提交
99e0e47d5f

+ 2 - 2
blob_cache.go

@@ -30,7 +30,7 @@ const (
 	// BlobCache.Configure() to not check if the referenced submodules exist.
 	// BlobCache.Configure() to not check if the referenced submodules exist.
 	ConfigBlobCacheIgnoreMissingSubmodules = "BlobCache.IgnoreMissingSubmodules"
 	ConfigBlobCacheIgnoreMissingSubmodules = "BlobCache.IgnoreMissingSubmodules"
 	// DependencyBlobCache identifies the dependency provided by BlobCache.
 	// DependencyBlobCache identifies the dependency provided by BlobCache.
-	DependencyBlobCache                    = "blob_cache"
+	DependencyBlobCache = "blob_cache"
 )
 )
 
 
 func (blobCache *BlobCache) Name() string {
 func (blobCache *BlobCache) Name() string {
@@ -52,7 +52,7 @@ func (blobCache *BlobCache) ListConfigurationOptions() []ConfigurationOption {
 		Name: ConfigBlobCacheIgnoreMissingSubmodules,
 		Name: ConfigBlobCacheIgnoreMissingSubmodules,
 		Description: "Specifies whether to panic if some referenced submodules do not exist and thus" +
 		Description: "Specifies whether to panic if some referenced submodules do not exist and thus" +
 			" the corresponding Git objects cannot be loaded. Override this if you know that the " +
 			" the corresponding Git objects cannot be loaded. Override this if you know that the " +
-				"history is dirty and you want to get things done.",
+			"history is dirty and you want to get things done.",
 		Flag:    "ignore-missing-submodules",
 		Flag:    "ignore-missing-submodules",
 		Type:    BoolConfigurationOption,
 		Type:    BoolConfigurationOption,
 		Default: false}}
 		Default: false}}

+ 2 - 2
burndown.go

@@ -104,13 +104,13 @@ const (
 	// ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.
 	// ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.
 	ConfigBurndownGranularity = "Burndown.Granularity"
 	ConfigBurndownGranularity = "Burndown.Granularity"
 	// ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.
 	// ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.
-	ConfigBurndownSampling    = "Burndown.Sampling"
+	ConfigBurndownSampling = "Burndown.Sampling"
 	// ConfigBurndownTrackFiles enables burndown collection for files.
 	// ConfigBurndownTrackFiles enables burndown collection for files.
 	ConfigBurndownTrackFiles = "Burndown.TrackFiles"
 	ConfigBurndownTrackFiles = "Burndown.TrackFiles"
 	// ConfigBurndownTrackPeople enables burndown collection for authors.
 	// ConfigBurndownTrackPeople enables burndown collection for authors.
 	ConfigBurndownTrackPeople = "Burndown.TrackPeople"
 	ConfigBurndownTrackPeople = "Burndown.TrackPeople"
 	// ConfigBurndownDebug enables some extra debug assertions.
 	// ConfigBurndownDebug enables some extra debug assertions.
-	ConfigBurndownDebug        = "Burndown.Debug"
+	ConfigBurndownDebug = "Burndown.Debug"
 	// DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
 	// DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
 	// and BurndownAnalysis.Sampling.
 	// and BurndownAnalysis.Sampling.
 	DefaultBurndownGranularity = 30
 	DefaultBurndownGranularity = 30

+ 3 - 1
cmd/hercules/embed.go

@@ -19,7 +19,9 @@ func main() {
 		panic(err)
 		panic(err)
 	}
 	}
 	defer file.Close()
 	defer file.Close()
-	file.WriteString("package main\n\nconst PLUGIN_TEMPLATE_SOURCE = `")
+	file.WriteString("package main\n\n" +
+		"// PluginTemplateSource is the source code template of a Hercules plugin.\n" +
+		"const PluginTemplateSource = `")
 	file.Write(contents)
 	file.Write(contents)
 	file.WriteString("`\n")
 	file.WriteString("`\n")
 }
 }

+ 4 - 3
cmd/hercules/generate_plugin.go

@@ -17,7 +17,8 @@ import (
 
 
 //go:generate go run embed.go
 //go:generate go run embed.go
 
 
-var SHLIB_EXT = map[string]string{
+// ShlibExts is the mapping between platform names and shared library file name extensions.
+var ShlibExts = map[string]string{
 	"window":  "dll",
 	"window":  "dll",
 	"linux":   "so",
 	"linux":   "so",
 	"darwin":  "dylib",
 	"darwin":  "dylib",
@@ -44,7 +45,7 @@ var generatePluginCmd = &cobra.Command{
 			panic(err)
 			panic(err)
 		}
 		}
 		outputPath := path.Join(outputDir, strings.ToLower(strings.Join(splitted, "_"))+".go")
 		outputPath := path.Join(outputDir, strings.ToLower(strings.Join(splitted, "_"))+".go")
-		gen := template.Must(template.New("plugin").Parse(PLUGIN_TEMPLATE_SOURCE))
+		gen := template.Must(template.New("plugin").Parse(PluginTemplateSource))
 		outFile, err := os.Create(outputPath)
 		outFile, err := os.Create(outputPath)
 		if err != nil {
 		if err != nil {
 			panic(err)
 			panic(err)
@@ -57,7 +58,7 @@ var generatePluginCmd = &cobra.Command{
 			flag = strings.ToLower(strings.Join(splitted, "-"))
 			flag = strings.ToLower(strings.Join(splitted, "-"))
 		}
 		}
 		outputBase := path.Base(outputPath)
 		outputBase := path.Base(outputPath)
-		shlib := outputBase[:len(outputBase)-2] + SHLIB_EXT[runtime.GOOS]
+		shlib := outputBase[:len(outputBase)-2] + ShlibExts[runtime.GOOS]
 		protoBuf := outputPath[:len(outputPath)-3] + ".proto"
 		protoBuf := outputPath[:len(outputPath)-3] + ".proto"
 		pbGo := outputPath[:len(outputPath)-3] + ".pb.go"
 		pbGo := outputPath[:len(outputPath)-3] + ".pb.go"
 		dict := map[string]string{
 		dict := map[string]string{

+ 5 - 3
cmd/hercules/root.go

@@ -30,11 +30,13 @@ import (
 	"gopkg.in/src-d/hercules.v3/pb"
 	"gopkg.in/src-d/hercules.v3/pb"
 )
 )
 
 
-type OneLineWriter struct {
+// oneLineWriter splits the output data by lines and outputs one on top of another using '\r'.
+// It also does some dark magic to handle Git statuses.
+type oneLineWriter struct {
 	Writer io.Writer
 	Writer io.Writer
 }
 }
 
 
-func (writer OneLineWriter) Write(p []byte) (n int, err error) {
+func (writer oneLineWriter) Write(p []byte) (n int, err error) {
 	if p[len(p)-1] == '\n' {
 	if p[len(p)-1] == '\n' {
 		p = p[:len(p)-1]
 		p = p[:len(p)-1]
 		if len(p) > 5 && bytes.Compare(p[len(p)-5:], []byte("done.")) == 0 {
 		if len(p) > 5 && bytes.Compare(p[len(p)-5:], []byte("done.")) == 0 {
@@ -68,7 +70,7 @@ func loadRepository(uri string, cachePath string, disableStatus bool) *git.Repos
 		cloneOptions := &git.CloneOptions{URL: uri}
 		cloneOptions := &git.CloneOptions{URL: uri}
 		if !disableStatus {
 		if !disableStatus {
 			fmt.Fprint(os.Stderr, "connecting...\r")
 			fmt.Fprint(os.Stderr, "connecting...\r")
-			cloneOptions.Progress = OneLineWriter{Writer: os.Stderr}
+			cloneOptions.Progress = oneLineWriter{Writer: os.Stderr}
 		}
 		}
 		repository, err = git.Clone(backend, nil, cloneOptions)
 		repository, err = git.Clone(backend, nil, cloneOptions)
 		if !disableStatus {
 		if !disableStatus {

+ 21 - 20
contrib/_plugin_example/churn_analysis.go

@@ -7,14 +7,14 @@ import (
 	"strings"
 	"strings"
 	"unicode/utf8"
 	"unicode/utf8"
 
 
-  "gopkg.in/src-d/go-git.v4"
+	"github.com/gogo/protobuf/proto"
+	"github.com/sergi/go-diff/diffmatchpatch"
+	"gopkg.in/src-d/go-git.v4"
+	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 	"gopkg.in/src-d/go-git.v4/utils/merkletrie"
 	"gopkg.in/src-d/go-git.v4/utils/merkletrie"
-	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/hercules.v3"
 	"gopkg.in/src-d/hercules.v3"
 	"gopkg.in/src-d/hercules.v3/yaml"
 	"gopkg.in/src-d/hercules.v3/yaml"
-	"github.com/gogo/protobuf/proto"
-	"github.com/sergi/go-diff/diffmatchpatch"
 )
 )
 
 
 // ChurnAnalysis contains the intermediate state which is mutated by Consume(). It should implement
 // ChurnAnalysis contains the intermediate state which is mutated by Consume(). It should implement
@@ -30,14 +30,14 @@ type ChurnAnalysis struct {
 }
 }
 
 
 type editInfo struct {
 type editInfo struct {
-	Day int
-	Added int
+	Day     int
+	Added   int
 	Removed int
 	Removed int
 }
 }
 
 
 // ChurnAnalysisResult is returned by Finalize() and represents the analysis result.
 // ChurnAnalysisResult is returned by Finalize() and represents the analysis result.
 type ChurnAnalysisResult struct {
 type ChurnAnalysisResult struct {
-  Global Edits
+	Global Edits
 	People map[string]Edits
 	People map[string]Edits
 }
 }
 
 
@@ -80,7 +80,7 @@ func (churn *ChurnAnalysis) Requires() []string {
 // ListConfigurationOptions tells the engine which parameters can be changed through the command
 // ListConfigurationOptions tells the engine which parameters can be changed through the command
 // line.
 // line.
 func (churn *ChurnAnalysis) ListConfigurationOptions() []hercules.ConfigurationOption {
 func (churn *ChurnAnalysis) ListConfigurationOptions() []hercules.ConfigurationOption {
-	opts := [...]hercules.ConfigurationOption {{
+	opts := [...]hercules.ConfigurationOption{{
 		Name:        ConfigChurnTrackPeople,
 		Name:        ConfigChurnTrackPeople,
 		Description: "Record detailed statistics per each developer.",
 		Description: "Record detailed statistics per each developer.",
 		Flag:        "churn-people",
 		Flag:        "churn-people",
@@ -122,7 +122,8 @@ func (churn *ChurnAnalysis) Consume(deps map[string]interface{}) (map[string]int
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		added := 0; removed := 0
+		added := 0
+		removed := 0
 		switch action {
 		switch action {
 		case merkletrie.Insert:
 		case merkletrie.Insert:
 			added, err = hercules.CountLines(cache[change.To.TreeEntry.Hash])
 			added, err = hercules.CountLines(cache[change.To.TreeEntry.Hash])
@@ -167,16 +168,16 @@ func (churn *ChurnAnalysis) Consume(deps map[string]interface{}) (map[string]int
 }
 }
 
 
 func (churn *ChurnAnalysis) Finalize() interface{} {
 func (churn *ChurnAnalysis) Finalize() interface{} {
-  result := ChurnAnalysisResult{
-	  Global: editInfosToEdits(churn.global),
-	  People: map[string]Edits{},
-  }
+	result := ChurnAnalysisResult{
+		Global: editInfosToEdits(churn.global),
+		People: map[string]Edits{},
+	}
 	if churn.TrackPeople {
 	if churn.TrackPeople {
 		for key, val := range churn.people {
 		for key, val := range churn.people {
 			result.People[churn.reversedPeopleDict[key]] = editInfosToEdits(val)
 			result.People[churn.reversedPeopleDict[key]] = editInfosToEdits(val)
 		}
 		}
 	}
 	}
-  return result
+	return result
 }
 }
 
 
 func (churn *ChurnAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
 func (churn *ChurnAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
@@ -189,7 +190,7 @@ func (churn *ChurnAnalysis) Serialize(result interface{}, binary bool, writer io
 }
 }
 
 
 func (churn *ChurnAnalysis) serializeText(result *ChurnAnalysisResult, writer io.Writer) {
 func (churn *ChurnAnalysis) serializeText(result *ChurnAnalysisResult, writer io.Writer) {
-  fmt.Fprintln(writer, "  global:")
+	fmt.Fprintln(writer, "  global:")
 	printEdits(result.Global, writer, 4)
 	printEdits(result.Global, writer, 4)
 	for key, val := range result.People {
 	for key, val := range result.People {
 		fmt.Fprintf(writer, "  %s:\n", yaml.SafeString(key))
 		fmt.Fprintf(writer, "  %s:\n", yaml.SafeString(key))
@@ -210,7 +211,7 @@ func (churn *ChurnAnalysis) serializeBinary(result *ChurnAnalysisResult, writer
 		return err
 		return err
 	}
 	}
 	writer.Write(serialized)
 	writer.Write(serialized)
-  return nil
+	return nil
 }
 }
 
 
 func editInfosToEdits(eis []editInfo) Edits {
 func editInfosToEdits(eis []editInfo) Edits {
@@ -245,9 +246,9 @@ func editInfosToEdits(eis []editInfo) Edits {
 func printEdits(edits Edits, writer io.Writer, indent int) {
 func printEdits(edits Edits, writer io.Writer, indent int) {
 	strIndent := strings.Repeat(" ", indent)
 	strIndent := strings.Repeat(" ", indent)
 	printArray := func(arr []int, name string) {
 	printArray := func(arr []int, name string) {
-	  fmt.Fprintf(writer, "%s%s: [", strIndent, name)
+		fmt.Fprintf(writer, "%s%s: [", strIndent, name)
 		for i, v := range arr {
 		for i, v := range arr {
-			if i < len(arr) - 1 {
+			if i < len(arr)-1 {
 				fmt.Fprintf(writer, "%d, ", v)
 				fmt.Fprintf(writer, "%d, ", v)
 			} else {
 			} else {
 				fmt.Fprintf(writer, "%d]\n", v)
 				fmt.Fprintf(writer, "%d]\n", v)
@@ -261,9 +262,9 @@ func printEdits(edits Edits, writer io.Writer, indent int) {
 
 
 func editsToEditsMessage(edits Edits) *EditsMessage {
 func editsToEditsMessage(edits Edits) *EditsMessage {
 	message := &EditsMessage{
 	message := &EditsMessage{
-		Days: make([]uint32, len(edits.Days)),
+		Days:      make([]uint32, len(edits.Days)),
 		Additions: make([]uint32, len(edits.Additions)),
 		Additions: make([]uint32, len(edits.Additions)),
-		Removals: make([]uint32, len(edits.Removals)),
+		Removals:  make([]uint32, len(edits.Removals)),
 	}
 	}
 	copyInts := func(arr []int, where []uint32) {
 	copyInts := func(arr []int, where []uint32) {
 		for i, v := range arr {
 		for i, v := range arr {

+ 4 - 4
identity.go

@@ -23,25 +23,25 @@ type IdentityDetector struct {
 const (
 const (
 	// AuthorMissing is the internal author index which denotes any unmatched identities
 	// AuthorMissing is the internal author index which denotes any unmatched identities
 	// (IdentityDetector.Consume()).
 	// (IdentityDetector.Consume()).
-	AuthorMissing   = (1 << 18) - 1
+	AuthorMissing = (1 << 18) - 1
 	// AuthorMissingName is the string name which corresponds to AuthorMissing.
 	// AuthorMissingName is the string name which corresponds to AuthorMissing.
 	AuthorMissingName = "<unmatched>"
 	AuthorMissingName = "<unmatched>"
 
 
 	// FactIdentityDetectorPeopleDict is the name of the fact which is inserted in
 	// FactIdentityDetectorPeopleDict is the name of the fact which is inserted in
 	// IdentityDetector.Configure(). It corresponds to IdentityDetector.PeopleDict - the mapping
 	// IdentityDetector.Configure(). It corresponds to IdentityDetector.PeopleDict - the mapping
 	// from the signatures to the author indices.
 	// from the signatures to the author indices.
-	FactIdentityDetectorPeopleDict         = "IdentityDetector.PeopleDict"
+	FactIdentityDetectorPeopleDict = "IdentityDetector.PeopleDict"
 	// FactIdentityDetectorReversedPeopleDict is the name of the fact which is inserted in
 	// FactIdentityDetectorReversedPeopleDict is the name of the fact which is inserted in
 	// IdentityDetector.Configure(). It corresponds to IdentityDetector.ReversedPeopleDict -
 	// IdentityDetector.Configure(). It corresponds to IdentityDetector.ReversedPeopleDict -
 	// the mapping from the author indices to the main signature.
 	// the mapping from the author indices to the main signature.
 	FactIdentityDetectorReversedPeopleDict = "IdentityDetector.ReversedPeopleDict"
 	FactIdentityDetectorReversedPeopleDict = "IdentityDetector.ReversedPeopleDict"
 	// ConfigIdentityDetectorPeopleDictPath is the name of the configuration option
 	// ConfigIdentityDetectorPeopleDictPath is the name of the configuration option
 	// (IdentityDetector.Configure()) which allows to set the external PeopleDict mapping from a file.
 	// (IdentityDetector.Configure()) which allows to set the external PeopleDict mapping from a file.
-	ConfigIdentityDetectorPeopleDictPath   = "IdentityDetector.PeopleDictPath"
+	ConfigIdentityDetectorPeopleDictPath = "IdentityDetector.PeopleDictPath"
 	// FactIdentityDetectorPeopleCount is the name of the fact which is inserted in
 	// FactIdentityDetectorPeopleCount is the name of the fact which is inserted in
 	// IdentityDetector.Configure(). It is equal to the overall number of unique authors
 	// IdentityDetector.Configure(). It is equal to the overall number of unique authors
 	// (the length of ReversedPeopleDict).
 	// (the length of ReversedPeopleDict).
-	FactIdentityDetectorPeopleCount        = "IdentityDetector.PeopleCount"
+	FactIdentityDetectorPeopleCount = "IdentityDetector.PeopleCount"
 
 
 	// DependencyAuthor is the name of the dependency provided by IdentityDetector.
 	// DependencyAuthor is the name of the dependency provided by IdentityDetector.
 	DependencyAuthor = "author"
 	DependencyAuthor = "author"

+ 8 - 1
pb/utils.go

@@ -2,6 +2,8 @@ package pb
 
 
 import "sort"
 import "sort"
 
 
+// ToBurndownSparseMatrix converts a rectangular integer matrix to the corresponding Protobuf object.
+// It is specific to hercules.BurndownAnalysis.
 func ToBurndownSparseMatrix(matrix [][]int64, name string) *BurndownSparseMatrix {
 func ToBurndownSparseMatrix(matrix [][]int64, name string) *BurndownSparseMatrix {
 	if len(matrix) == 0 {
 	if len(matrix) == 0 {
 		panic("matrix may not be nil or empty")
 		panic("matrix may not be nil or empty")
@@ -37,6 +39,8 @@ func ToBurndownSparseMatrix(matrix [][]int64, name string) *BurndownSparseMatrix
 	return &r
 	return &r
 }
 }
 
 
+// DenseToCompressedSparseRowMatrix takes an integer matrix and converts it to a Protobuf CSR.
+// CSR format: https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_.28CSR.2C_CRS_or_Yale_format.29
 func DenseToCompressedSparseRowMatrix(matrix [][]int64) *CompressedSparseRowMatrix {
 func DenseToCompressedSparseRowMatrix(matrix [][]int64) *CompressedSparseRowMatrix {
 	r := CompressedSparseRowMatrix{
 	r := CompressedSparseRowMatrix{
 		NumberOfRows:    int32(len(matrix)),
 		NumberOfRows:    int32(len(matrix)),
@@ -52,7 +56,7 @@ func DenseToCompressedSparseRowMatrix(matrix [][]int64) *CompressedSparseRowMatr
 			if col != 0 {
 			if col != 0 {
 				r.Data = append(r.Data, col)
 				r.Data = append(r.Data, col)
 				r.Indices = append(r.Indices, int32(x))
 				r.Indices = append(r.Indices, int32(x))
-				nnz += 1
+				nnz++
 			}
 			}
 		}
 		}
 		r.Indptr = append(r.Indptr, r.Indptr[len(r.Indptr)-1]+int64(nnz))
 		r.Indptr = append(r.Indptr, r.Indptr[len(r.Indptr)-1]+int64(nnz))
@@ -60,6 +64,9 @@ func DenseToCompressedSparseRowMatrix(matrix [][]int64) *CompressedSparseRowMatr
 	return &r
 	return &r
 }
 }
 
 
+// MapToCompressedSparseRowMatrix takes an integer matrix and converts it to a Protobuf CSR.
+// In contrast to DenseToCompressedSparseRowMatrix, a matrix here is already in DOK format.
+// CSR format: https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_.28CSR.2C_CRS_or_Yale_format.29
 func MapToCompressedSparseRowMatrix(matrix []map[int]int64) *CompressedSparseRowMatrix {
 func MapToCompressedSparseRowMatrix(matrix []map[int]int64) *CompressedSparseRowMatrix {
 	r := CompressedSparseRowMatrix{
 	r := CompressedSparseRowMatrix{
 		NumberOfRows:    int32(len(matrix)),
 		NumberOfRows:    int32(len(matrix)),

+ 7 - 10
rbtree/rbtree.go

@@ -103,7 +103,7 @@ func (root *RBTree) Insert(item Item) (bool, Iterator) {
 	if n == nil {
 	if n == nil {
 		return false, Iterator{}
 		return false, Iterator{}
 	}
 	}
-	ins_n := n
+	insN := n
 
 
 	n.color = red
 	n.color = red
 
 
@@ -159,7 +159,7 @@ func (root *RBTree) Insert(item Item) (bool, Iterator) {
 		}
 		}
 		break
 		break
 	}
 	}
-	return true, Iterator{root, ins_n}
+	return true, Iterator{root, insN}
 }
 }
 
 
 // Delete an item with the given Key. Return true iff the item was
 // Delete an item with the given Key. Return true iff the item was
@@ -192,8 +192,8 @@ type Iterator struct {
 	node *node
 	node *node
 }
 }
 
 
-func (iter Iterator) Equal(iter_ Iterator) bool {
-	return iter.node == iter_.node
+func (iter Iterator) Equal(other Iterator) bool {
+	return iter.node == other.node
 }
 }
 
 
 // Check if the iterator points beyond the max element in the tree
 // Check if the iterator points beyond the max element in the tree
@@ -414,9 +414,8 @@ func (root *RBTree) doInsert(item Item) *node {
 				root.count++
 				root.count++
 				root.maybeSetMinNode(n)
 				root.maybeSetMinNode(n)
 				return n
 				return n
-			} else {
-				parent = parent.left
 			}
 			}
+			parent = parent.left
 		} else {
 		} else {
 			if parent.right == nil {
 			if parent.right == nil {
 				n := &node{item: item, parent: parent}
 				n := &node{item: item, parent: parent}
@@ -424,9 +423,8 @@ func (root *RBTree) doInsert(item Item) *node {
 				root.count++
 				root.count++
 				root.maybeSetMaxNode(n)
 				root.maybeSetMaxNode(n)
 				return n
 				return n
-			} else {
-				parent = parent.right
 			}
 			}
+			parent = parent.right
 		}
 		}
 	}
 	}
 	panic("should not reach here")
 	panic("should not reach here")
@@ -457,9 +455,8 @@ func (root *RBTree) findGE(key int) (*node, bool) {
 				succ := n.doNext()
 				succ := n.doNext()
 				if succ == nil {
 				if succ == nil {
 					return nil, false
 					return nil, false
-				} else {
-					return succ, (key == succ.item.Key)
 				}
 				}
+				return succ, key == succ.item.Key
 			}
 			}
 		}
 		}
 	}
 	}

+ 1 - 1
renames.go

@@ -165,7 +165,7 @@ func (ra *RenameAnalysis) Consume(deps map[string]interface{}) (map[string]inter
 				reducedChanges = append(
 				reducedChanges = append(
 					reducedChanges,
 					reducedChanges,
 					&object.Change{From: deletedBlobs[d].change.From,
 					&object.Change{From: deletedBlobs[d].change.From,
-						To:                addedBlobs[a].change.To})
+						To: addedBlobs[a].change.To})
 				break
 				break
 			}
 			}
 		}
 		}

+ 2 - 2
shotness.go

@@ -33,14 +33,14 @@ const (
 	// ConfigShotnessXpathName is the name of the configuration option (ShotnessAnalysis.Configure())
 	// ConfigShotnessXpathName is the name of the configuration option (ShotnessAnalysis.Configure())
 	// which sets the UAST XPath to find the name of the nodes chosen by ConfigShotnessXpathStruct.
 	// which sets the UAST XPath to find the name of the nodes chosen by ConfigShotnessXpathStruct.
 	// These XPath-s can be different for some languages.
 	// These XPath-s can be different for some languages.
-	ConfigShotnessXpathName   = "Shotness.XpathName"
+	ConfigShotnessXpathName = "Shotness.XpathName"
 
 
 	// DefaultShotnessXpathStruct is the default UAST XPath to choose the analysed nodes.
 	// DefaultShotnessXpathStruct is the default UAST XPath to choose the analysed nodes.
 	// It extracts functions.
 	// It extracts functions.
 	DefaultShotnessXpathStruct = "//*[@roleFunction and @roleDeclaration]"
 	DefaultShotnessXpathStruct = "//*[@roleFunction and @roleDeclaration]"
 	// DefaultShotnessXpathName is the default UAST XPath to choose the names of the analysed nodes.
 	// DefaultShotnessXpathName is the default UAST XPath to choose the names of the analysed nodes.
 	// It looks at the current tree level and at the immediate children.
 	// It looks at the current tree level and at the immediate children.
-	DefaultShotnessXpathName   = "/*[@roleFunction and @roleIdentifier and @roleName] | /*/*[@roleFunction and @roleIdentifier and @roleName]"
+	DefaultShotnessXpathName = "/*[@roleFunction and @roleIdentifier and @roleName] | /*/*[@roleFunction and @roleIdentifier and @roleName]"
 )
 )
 
 
 type nodeShotness struct {
 type nodeShotness struct {

+ 1 - 0
toposort/toposort.go

@@ -9,6 +9,7 @@ import (
 
 
 // Reworked from https://github.com/philopon/go-toposort
 // Reworked from https://github.com/philopon/go-toposort
 
 
+// Graph represents a directed acyclic graph.
 type Graph struct {
 type Graph struct {
 	// Outgoing connections for every node.
 	// Outgoing connections for every node.
 	outputs map[string]map[string]int
 	outputs map[string]map[string]int

+ 12 - 12
uast.go

@@ -47,23 +47,23 @@ const (
 
 
 	// ConfigUASTEndpoint is the name of the configuration option (UASTExtractor.Configure())
 	// ConfigUASTEndpoint is the name of the configuration option (UASTExtractor.Configure())
 	// which sets the Babelfish server address.
 	// which sets the Babelfish server address.
-	ConfigUASTEndpoint     = "ConfigUASTEndpoint"
+	ConfigUASTEndpoint = "ConfigUASTEndpoint"
 	// ConfigUASTTimeout is the name of the configuration option (UASTExtractor.Configure())
 	// ConfigUASTTimeout is the name of the configuration option (UASTExtractor.Configure())
 	// which sets the maximum amount of time to wait for a Babelfish server response.
 	// which sets the maximum amount of time to wait for a Babelfish server response.
-	ConfigUASTTimeout      = "ConfigUASTTimeout"
+	ConfigUASTTimeout = "ConfigUASTTimeout"
 	// ConfigUASTPoolSize is the name of the configuration option (UASTExtractor.Configure())
 	// ConfigUASTPoolSize is the name of the configuration option (UASTExtractor.Configure())
 	// which sets the number of goroutines to run for UAST parse queries.
 	// which sets the number of goroutines to run for UAST parse queries.
-	ConfigUASTPoolSize     = "ConfigUASTPoolSize"
+	ConfigUASTPoolSize = "ConfigUASTPoolSize"
 	// ConfigUASTFailOnErrors is the name of the configuration option (UASTExtractor.Configure())
 	// ConfigUASTFailOnErrors is the name of the configuration option (UASTExtractor.Configure())
 	// which enables early exit in case of any Babelfish UAST parsing errors.
 	// which enables early exit in case of any Babelfish UAST parsing errors.
 	ConfigUASTFailOnErrors = "ConfigUASTFailOnErrors"
 	ConfigUASTFailOnErrors = "ConfigUASTFailOnErrors"
 	// ConfigUASTLanguages is the name of the configuration option (UASTExtractor.Configure())
 	// ConfigUASTLanguages is the name of the configuration option (UASTExtractor.Configure())
 	// which sets the list of languages to parse. Language names are at
 	// which sets the list of languages to parse. Language names are at
 	// https://doc.bblf.sh/languages.html Names are joined with a comma ",".
 	// https://doc.bblf.sh/languages.html Names are joined with a comma ",".
-	ConfigUASTLanguages    = "ConfigUASTLanguages"
+	ConfigUASTLanguages = "ConfigUASTLanguages"
 
 
 	// FeatureUast is the name of the Pipeline feature which activates all the items related to UAST.
 	// FeatureUast is the name of the Pipeline feature which activates all the items related to UAST.
-	FeatureUast     = "uast"
+	FeatureUast = "uast"
 	// DependencyUasts is the name of the dependency provided by UASTExtractor.
 	// DependencyUasts is the name of the dependency provided by UASTExtractor.
 	DependencyUasts = "uasts"
 	DependencyUasts = "uasts"
 )
 )
@@ -78,18 +78,18 @@ type uastTask struct {
 }
 }
 
 
 type worker struct {
 type worker struct {
-	Client *bblfsh.Client
-	Job    func(interface{}) interface{}
+	Client   *bblfsh.Client
+	Callback func(interface{}) interface{}
 }
 }
 
 
-func (w worker) TunnyReady() bool {
+func (w worker) Ready() bool {
 	return true
 	return true
 }
 }
 
 
-func (w worker) TunnyJob(data interface{}) interface{} {
+func (w worker) Job(data interface{}) interface{} {
 	task := data.(uastTask)
 	task := data.(uastTask)
 	task.Client = w.Client
 	task.Client = w.Client
-	return w.Job(task)
+	return w.Callback(task)
 }
 }
 
 
 func (exr *UASTExtractor) Name() string {
 func (exr *UASTExtractor) Name() string {
@@ -188,9 +188,9 @@ func (exr *UASTExtractor) Initialize(repository *git.Repository) {
 	if exr.pool != nil {
 	if exr.pool != nil {
 		exr.pool.Close()
 		exr.pool.Close()
 	}
 	}
-	workers := make([]tunny.TunnyWorker, poolSize)
+	workers := make([]tunny.Worker, poolSize)
 	for i := 0; i < poolSize; i++ {
 	for i := 0; i < poolSize; i++ {
-		workers[i] = worker{Client: exr.clients[i], Job: exr.extractTask}
+		workers[i] = worker{Client: exr.clients[i], Callback: exr.extractTask}
 	}
 	}
 	exr.pool, err = tunny.CreateCustomPool(workers).Open()
 	exr.pool, err = tunny.CreateCustomPool(workers).Open()
 	if err != nil {
 	if err != nil {

+ 17 - 17
vendor/github.com/jeffail/tunny/tunny.go

@@ -43,39 +43,39 @@ var (
 )
 )
 
 
 /*
 /*
-TunnyWorker - The basic interface of a tunny worker.
+Worker - The basic interface of a tunny worker.
 */
 */
-type TunnyWorker interface {
+type Worker interface {
 
 
 	// Called for each job, expects the result to be returned synchronously
 	// Called for each job, expects the result to be returned synchronously
-	TunnyJob(interface{}) interface{}
+	Job(interface{}) interface{}
 
 
 	// Called after each job, this indicates whether the worker is ready for the next job.
 	// Called after each job, this indicates whether the worker is ready for the next job.
 	// The default implementation is to return true always. If false is returned then the
 	// The default implementation is to return true always. If false is returned then the
 	// method is called every five milliseconds until either true is returned or the pool
 	// method is called every five milliseconds until either true is returned or the pool
 	// is closed. For efficiency you should have this call block until your worker is ready,
 	// is closed. For efficiency you should have this call block until your worker is ready,
 	// otherwise you introduce a 5ms latency between jobs.
 	// otherwise you introduce a 5ms latency between jobs.
-	TunnyReady() bool
+	Ready() bool
 }
 }
 
 
 /*
 /*
-TunnyExtendedWorker - An optional interface that can be implemented if the worker needs
+ExtendedWorker - An optional interface that can be implemented if the worker needs
 more control over its state.
 more control over its state.
 */
 */
-type TunnyExtendedWorker interface {
+type ExtendedWorker interface {
 
 
 	// Called when the pool is opened, this will be called before any jobs are sent.
 	// Called when the pool is opened, this will be called before any jobs are sent.
-	TunnyInitialize()
+	Initialize()
 
 
 	// Called when the pool is closed, this will be called after all jobs are completed.
 	// Called when the pool is closed, this will be called after all jobs are completed.
-	TunnyTerminate()
+	Terminate()
 }
 }
 
 
 /*
 /*
-TunnyInterruptable - An optional interface that can be implemented in order to allow the
+Interruptable - An optional interface that can be implemented in order to allow the
 worker to drop jobs when they are abandoned.
 worker to drop jobs when they are abandoned.
 */
 */
-type TunnyInterruptable interface {
+type Interruptable interface {
 
 
 	// Called when the current job has been abandoned by the client.
 	// Called when the current job has been abandoned by the client.
 	TunnyInterrupt()
 	TunnyInterrupt()
@@ -85,15 +85,15 @@ type TunnyInterruptable interface {
 Default and very basic implementation of a tunny worker. This worker holds a closure which
 Default and very basic implementation of a tunny worker. This worker holds a closure which
 is assigned at construction, and this closure is called on each job.
 is assigned at construction, and this closure is called on each job.
 */
 */
-type tunnyDefaultWorker struct {
+type defaultWorker struct {
 	job *func(interface{}) interface{}
 	job *func(interface{}) interface{}
 }
 }
 
 
-func (worker *tunnyDefaultWorker) TunnyJob(data interface{}) interface{} {
+func (worker *defaultWorker) Job(data interface{}) interface{} {
 	return (*worker.job)(data)
 	return (*worker.job)(data)
 }
 }
 
 
-func (worker *tunnyDefaultWorker) TunnyReady() bool {
+func (worker *defaultWorker) Ready() bool {
 	return true
 	return true
 }
 }
 
 
@@ -181,7 +181,7 @@ func CreatePool(numWorkers int, job func(interface{}) interface{}) *WorkPool {
 	pool.workers = make([]*workerWrapper, numWorkers)
 	pool.workers = make([]*workerWrapper, numWorkers)
 	for i := range pool.workers {
 	for i := range pool.workers {
 		newWorker := workerWrapper{
 		newWorker := workerWrapper{
-			worker: &(tunnyDefaultWorker{&job}),
+			worker: &(defaultWorker{&job}),
 		}
 		}
 		pool.workers[i] = &newWorker
 		pool.workers[i] = &newWorker
 	}
 	}
@@ -207,10 +207,10 @@ func CreatePoolGeneric(numWorkers int) *WorkPool {
 
 
 /*
 /*
 CreateCustomPool - Creates a pool for an array of custom workers. The custom workers
 CreateCustomPool - Creates a pool for an array of custom workers. The custom workers
-must implement TunnyWorker, and may also optionally implement TunnyExtendedWorker and
-TunnyInterruptable.
+must implement Worker, and may also optionally implement ExtendedWorker and
+Interruptable.
 */
 */
-func CreateCustomPool(customWorkers []TunnyWorker) *WorkPool {
+func CreateCustomPool(customWorkers []Worker) *WorkPool {
 	pool := WorkPool{running: 0}
 	pool := WorkPool{running: 0}
 
 
 	pool.workers = make([]*workerWrapper, len(customWorkers))
 	pool.workers = make([]*workerWrapper, len(customWorkers))

+ 6 - 6
vendor/github.com/jeffail/tunny/tunny_test.go

@@ -109,7 +109,7 @@ type dummyWorker struct {
 
 
 func (d *dummyWorker) TunnyJob(in interface{}) interface{} {
 func (d *dummyWorker) TunnyJob(in interface{}) interface{} {
 	if !d.ready {
 	if !d.ready {
-		d.t.Errorf("TunnyJob called without polling TunnyReady")
+		d.t.Errorf("Job called without polling Ready")
 	}
 	}
 	d.ready = false
 	d.ready = false
 	return in
 	return in
@@ -122,7 +122,7 @@ func (d *dummyWorker) TunnyReady() bool {
 
 
 // Test the pool with a basic worker implementation
 // Test the pool with a basic worker implementation
 func TestDummyWorker(t *testing.T) {
 func TestDummyWorker(t *testing.T) {
-	pool, err := CreateCustomPool([]TunnyWorker{&dummyWorker{t: t}}).Open()
+	pool, err := CreateCustomPool([]Worker{&dummyWorker{t: t}}).Open()
 	if err != nil {
 	if err != nil {
 		t.Errorf("Failed to create pool: %v", err)
 		t.Errorf("Failed to create pool: %v", err)
 		return
 		return
@@ -147,7 +147,7 @@ type dummyExtWorker struct {
 
 
 func (d *dummyExtWorker) TunnyJob(in interface{}) interface{} {
 func (d *dummyExtWorker) TunnyJob(in interface{}) interface{} {
 	if !d.initialized {
 	if !d.initialized {
-		d.t.Errorf("TunnyJob called without calling TunnyInitialize")
+		d.t.Errorf("Job called without calling Initialize")
 	}
 	}
 	return d.dummyWorker.TunnyJob(in)
 	return d.dummyWorker.TunnyJob(in)
 }
 }
@@ -158,7 +158,7 @@ func (d *dummyExtWorker) TunnyInitialize() {
 
 
 func (d *dummyExtWorker) TunnyTerminate() {
 func (d *dummyExtWorker) TunnyTerminate() {
 	if !d.initialized {
 	if !d.initialized {
-		d.t.Errorf("TunnyTerminate called without calling TunnyInitialize")
+		d.t.Errorf("Terminate called without calling Initialize")
 	}
 	}
 	d.initialized = false
 	d.initialized = false
 }
 }
@@ -166,7 +166,7 @@ func (d *dummyExtWorker) TunnyTerminate() {
 // Test the pool with an extended worker implementation
 // Test the pool with an extended worker implementation
 func TestDummyExtWorker(t *testing.T) {
 func TestDummyExtWorker(t *testing.T) {
 	pool, err := CreateCustomPool(
 	pool, err := CreateCustomPool(
-		[]TunnyWorker{
+		[]Worker{
 			&dummyExtWorker{
 			&dummyExtWorker{
 				dummyWorker: dummyWorker{t: t},
 				dummyWorker: dummyWorker{t: t},
 			},
 			},
@@ -213,7 +213,7 @@ func (d *dummyExtIntWorker) TunnyInterrupt() {
 // Test the pool with an extended and interruptible worker implementation
 // Test the pool with an extended and interruptible worker implementation
 func TestDummyExtIntWorker(t *testing.T) {
 func TestDummyExtIntWorker(t *testing.T) {
 	pool, err := CreateCustomPool(
 	pool, err := CreateCustomPool(
-		[]TunnyWorker{
+		[]Worker{
 			&dummyExtIntWorker{
 			&dummyExtIntWorker{
 				dummyExtWorker: dummyExtWorker{
 				dummyExtWorker: dummyExtWorker{
 					dummyWorker: dummyWorker{t: t},
 					dummyWorker: dummyWorker{t: t},

+ 9 - 9
vendor/github.com/jeffail/tunny/worker.go

@@ -32,7 +32,7 @@ type workerWrapper struct {
 	jobChan    chan interface{}
 	jobChan    chan interface{}
 	outputChan chan interface{}
 	outputChan chan interface{}
 	poolOpen   uint32
 	poolOpen   uint32
-	worker     TunnyWorker
+	worker     Worker
 }
 }
 
 
 func (wrapper *workerWrapper) Loop() {
 func (wrapper *workerWrapper) Loop() {
@@ -40,7 +40,7 @@ func (wrapper *workerWrapper) Loop() {
 	// TODO: Configure?
 	// TODO: Configure?
 	tout := time.Duration(5)
 	tout := time.Duration(5)
 
 
-	for !wrapper.worker.TunnyReady() {
+	for !wrapper.worker.Ready() {
 		// It's sad that we can't simply check if jobChan is closed here.
 		// It's sad that we can't simply check if jobChan is closed here.
 		if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
 		if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
 			break
 			break
@@ -51,8 +51,8 @@ func (wrapper *workerWrapper) Loop() {
 	wrapper.readyChan <- 1
 	wrapper.readyChan <- 1
 
 
 	for data := range wrapper.jobChan {
 	for data := range wrapper.jobChan {
-		wrapper.outputChan <- wrapper.worker.TunnyJob(data)
-		for !wrapper.worker.TunnyReady() {
+		wrapper.outputChan <- wrapper.worker.Job(data)
+		for !wrapper.worker.Ready() {
 			if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
 			if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
 				break
 				break
 			}
 			}
@@ -67,8 +67,8 @@ func (wrapper *workerWrapper) Loop() {
 }
 }
 
 
 func (wrapper *workerWrapper) Open() {
 func (wrapper *workerWrapper) Open() {
-	if extWorker, ok := wrapper.worker.(TunnyExtendedWorker); ok {
-		extWorker.TunnyInitialize()
+	if extWorker, ok := wrapper.worker.(ExtendedWorker); ok {
+		extWorker.Initialize()
 	}
 	}
 
 
 	wrapper.readyChan = make(chan int)
 	wrapper.readyChan = make(chan int)
@@ -98,13 +98,13 @@ func (wrapper *workerWrapper) Join() {
 		}
 		}
 	}
 	}
 
 
-	if extWorker, ok := wrapper.worker.(TunnyExtendedWorker); ok {
-		extWorker.TunnyTerminate()
+	if extWorker, ok := wrapper.worker.(ExtendedWorker); ok {
+		extWorker.Terminate()
 	}
 	}
 }
 }
 
 
 func (wrapper *workerWrapper) Interrupt() {
 func (wrapper *workerWrapper) Interrupt() {
-	if extWorker, ok := wrapper.worker.(TunnyInterruptable); ok {
+	if extWorker, ok := wrapper.worker.(Interruptable); ok {
 		extWorker.TunnyInterrupt()
 		extWorker.TunnyInterrupt()
 	}
 	}
 }
 }

+ 6 - 0
yaml/utils.go

@@ -7,12 +7,18 @@ import (
 	"strings"
 	"strings"
 )
 )
 
 
+// SafeString returns a string which is sufficiently quoted and escaped for YAML.
 func SafeString(str string) string {
 func SafeString(str string) string {
 	str = strings.Replace(str, "\\", "\\\\", -1)
 	str = strings.Replace(str, "\\", "\\\\", -1)
 	str = strings.Replace(str, "\"", "\\\"", -1)
 	str = strings.Replace(str, "\"", "\\\"", -1)
 	return "\"" + str + "\""
 	return "\"" + str + "\""
 }
 }
 
 
+// PrintMatrix outputs a rectangular integer matrix in YAML text format.
+//
+// `indent` is the current YAML indentation level - the number of spaces.
+// `name` is the name of the corresponding YAML block. If empty, no separate block is created.
+// `fixNegative` changes all negative values to 0.
 func PrintMatrix(writer io.Writer, matrix [][]int64, indent int, name string, fixNegative bool) {
 func PrintMatrix(writer io.Writer, matrix [][]int64, indent int, name string, fixNegative bool) {
 	// determine the maximum length of each value
 	// determine the maximum length of each value
 	var maxnum int64 = -(1 << 32)
 	var maxnum int64 = -(1 << 32)