浏览代码

Fix golint warnings related to docs for exported types

Vadim Markovtsev 7 年之前
父节点
当前提交
b86f61d059
共有 16 个文件被更改,包括 153 次插入43 次删除
  1. 9 4
      blob_cache.go
  2. 16 7
      burndown.go
  3. 9 4
      couples.go
  4. 4 0
      day.go
  5. 8 0
      diff.go
  6. 6 1
      diff_refiner.go
  7. 4 2
      file.go
  8. 3 3
      file_test.go
  9. 24 7
      identity.go
  10. 2 2
      identity_test.go
  11. 19 12
      pipeline.go
  12. 7 0
      renames.go
  13. 12 1
      shotness.go
  14. 5 0
      tree_diff.go
  15. 23 0
      uast.go
  16. 2 0
      version.go

+ 9 - 4
blob_cache.go

@@ -11,7 +11,8 @@ import (
 	"gopkg.in/src-d/go-git.v4/utils/merkletrie"
 )
 
-// This PipelineItem loads the blobs which correspond to the changed files in a commit.
+// BlobCache loads the blobs which correspond to the changed files in a commit.
+// It is a PipelineItem.
 // It must provide the old and the new objects; "blobCache" rotates and allows to not load
 // the same blobs twice. Outdated objects are removed so "blobCache" never grows big.
 type BlobCache struct {
@@ -25,7 +26,10 @@ type BlobCache struct {
 }
 
 const (
+	// ConfigBlobCacheIgnoreMissingSubmodules is the name of the configuration option for
+	// BlobCache.Configure() to not check if the referenced submodules exist.
 	ConfigBlobCacheIgnoreMissingSubmodules = "BlobCache.IgnoreMissingSubmodules"
+	// DependencyBlobCache identifies the dependency provided by BlobCache.
 	DependencyBlobCache                    = "blob_cache"
 )
 
@@ -46,8 +50,9 @@ func (blobCache *BlobCache) Requires() []string {
 func (blobCache *BlobCache) ListConfigurationOptions() []ConfigurationOption {
 	options := [...]ConfigurationOption{{
 		Name: ConfigBlobCacheIgnoreMissingSubmodules,
-		Description: "Specifies whether to panic if some submodules do not exist and thus " +
-			"the corresponding Git objects cannot be loaded.",
+		Description: "Specifies whether to panic if some referenced submodules do not exist and thus" +
+			" the corresponding Git objects cannot be loaded. Override this if you know that the " +
+				"history is dirty and you want to get things done.",
 		Flag:    "ignore-missing-submodules",
 		Type:    BoolConfigurationOption,
 		Default: false}}
@@ -125,7 +130,7 @@ func (blobCache *BlobCache) Consume(deps map[string]interface{}) (map[string]int
 	return map[string]interface{}{DependencyBlobCache: cache}, nil
 }
 
-// The definition of a function which loads a git file by the specified path.
+// FileGetter defines a function which loads the Git file by the specified path.
 // The state can be arbitrary though here it always corresponds to the currently processed
 // commit.
 type FileGetter func(path string) (*object.File, error)

+ 16 - 7
burndown.go

@@ -19,7 +19,8 @@ import (
 	"gopkg.in/src-d/hercules.v3/yaml"
 )
 
-// BurndownAnalyser allows to gather the line burndown statistics for a Git repository.
+// BurndownAnalysis allows to gather the line burndown statistics for a Git repository.
+// It is a LeafPipelineItem.
 // Reference: https://erikbern.com/2016/12/05/the-half-life-of-code.html
 type BurndownAnalysis struct {
 	// Granularity sets the size of each band - the number of days it spans.
@@ -68,7 +69,8 @@ type BurndownAnalysis struct {
 	reversedPeopleDict []string
 }
 
-// Carries the result of running BurndownAnalysis - it is returned by BurndownAnalysis.Finalize().
+// BurndownResult carries the result of running BurndownAnalysis - it is returned by
+// BurndownAnalysis.Finalize().
 type BurndownResult struct {
 	// [number of samples][number of bands]
 	// The number of samples depends on Sampling: the less Sampling, the bigger the number.
@@ -99,15 +101,22 @@ type BurndownResult struct {
 }
 
 const (
+	// ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.
 	ConfigBurndownGranularity = "Burndown.Granularity"
+	// ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.
 	ConfigBurndownSampling    = "Burndown.Sampling"
-	// Measuring individual files is optional and false by default.
+	// ConfigBurndownTrackFiles enables burndown collection for files.
 	ConfigBurndownTrackFiles = "Burndown.TrackFiles"
-	// Measuring authors is optional and false by default.
+	// ConfigBurndownTrackPeople enables burndown collection for authors.
 	ConfigBurndownTrackPeople = "Burndown.TrackPeople"
-	// Enables some extra debug assertions.
+	// ConfigBurndownDebug enables some extra debug assertions.
 	ConfigBurndownDebug        = "Burndown.Debug"
+	// DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
+	// and BurndownAnalysis.Sampling.
 	DefaultBurndownGranularity = 30
+	// authorSelf is the internal author index which is used in BurndownAnalysis.Finalize() to
+	// format the author overwrites matrix.
+	authorSelf = (1 << 18) - 2
 )
 
 func (analyser *BurndownAnalysis) Name() string {
@@ -265,7 +274,7 @@ func (analyser *BurndownAnalysis) Finalize() interface{} {
 		for key, val := range row {
 			if key == AuthorMissing {
 				key = -1
-			} else if key == AuthorSelf {
+			} else if key == authorSelf {
 				key = -2
 			}
 			mrow[key+2] = val
@@ -809,7 +818,7 @@ func (analyser *BurndownAnalysis) updateMatrix(
 		return
 	}
 	if newAuthor == oldAuthor && delta > 0 {
-		newAuthor = AuthorSelf
+		newAuthor = authorSelf
 	}
 	row := matrix[oldAuthor]
 	if row == nil {

+ 9 - 4
couples.go

@@ -13,27 +13,32 @@ import (
 	"gopkg.in/src-d/hercules.v3/yaml"
 )
 
+// CouplesAnalysis calculates the number of common commits for files and authors.
+// The results are matrices, where cell at row X and column Y is the number of commits which
+// changed X and Y together. In case with people, the numbers are summed for every common file.
 type CouplesAnalysis struct {
-	// The number of developers for which to build the matrix. 0 disables this analysis.
+	// PeopleNumber is the number of developers for which to build the matrix. 0 disables this analysis.
 	PeopleNumber int
 
 	// people store how many times every developer committed to every file.
 	people []map[string]int
-	// peopleCommits is the number of commits each author made
+	// peopleCommits is the number of commits each author made.
 	peopleCommits []int
 	// files store every file occurred in the same commit with every other file.
 	files map[string]map[string]int
-	// references IdentityDetector.ReversedPeopleDict
+	// reversedPeopleDict references IdentityDetector.ReversedPeopleDict
 	reversedPeopleDict []string
 }
 
+// CouplesResult is returned by CouplesAnalysis.Finalize() and carries couples matrices from
+// authors and files.
 type CouplesResult struct {
 	PeopleMatrix []map[int]int64
 	PeopleFiles  [][]int
 	FilesMatrix  []map[int]int64
 	Files        []string
 
-	// references IdentityDetector.ReversedPeopleDict
+	// reversedPeopleDict references IdentityDetector.ReversedPeopleDict
 	reversedPeopleDict []string
 }
 

+ 4 - 0
day.go

@@ -7,12 +7,16 @@ import (
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 )
 
+// DaysSinceStart provides the relative date information for every commit.
+// It is a PipelineItem.
 type DaysSinceStart struct {
 	day0        time.Time
 	previousDay int
 }
 
 const (
+	// DependencyDay is the name of the dependency which DaysSinceStart provides - the number
+	// of days since the first commit in the analysed sequence.
 	DependencyDay = "day"
 )
 

+ 8 - 0
diff.go

@@ -14,16 +14,22 @@ import (
 )
 
 // FileDiff calculates the difference of files which were modified.
+// It is a PipelineItem.
 type FileDiff struct {
 	CleanupDisabled bool
 }
 
 const (
+	// ConfigFileDiffDisableCleanup is the name of the configuration option (FileDiff.Configure())
+	// to suppress diffmatchpatch.DiffCleanupSemanticLossless() which is supposed to improve
+	// the human interpretability of diffs.
 	ConfigFileDiffDisableCleanup = "FileDiff.NoCleanup"
 
+	// DependencyFileDiff is the name of the dependency provided by FileDiff.
 	DependencyFileDiff = "file_diff"
 )
 
+// FileDiffData is the type of the dependency provided by FileDiff.
 type FileDiffData struct {
 	OldLinesOfCode int
 	NewLinesOfCode int
@@ -104,6 +110,7 @@ func (diff *FileDiff) Consume(deps map[string]interface{}) (map[string]interface
 	return map[string]interface{}{DependencyFileDiff: result}, nil
 }
 
+// CountLines returns the number of lines in a *object.Blob.
 func CountLines(file *object.Blob) (int, error) {
 	if file == nil {
 		return -1, errors.New("blob is nil: probably not cached")
@@ -132,6 +139,7 @@ func CountLines(file *object.Blob) (int, error) {
 	return counter, nil
 }
 
+// BlobToString reads *object.Blob and returns its contents as a string.
 func BlobToString(file *object.Blob) (string, error) {
 	if file == nil {
 		return "", errors.New("blob is nil: probably not cached")

+ 6 - 1
diff_refiner.go

@@ -8,6 +8,10 @@ import (
 	"gopkg.in/src-d/go-git.v4"
 )
 
+// FileDiffRefiner uses UASTs to improve the human interpretability of diffs.
+// It is a PipelineItem.
+// The idea behind this algorithm is simple: in case of multiple choices which are equally
+// optimal, choose the one which touches less AST nodes.
 type FileDiffRefiner struct {
 }
 
@@ -138,7 +142,8 @@ func (ref *FileDiffRefiner) Consume(deps map[string]interface{}) (map[string]int
 	return map[string]interface{}{DependencyFileDiff: result}, nil
 }
 
-// Depth first tree traversal.
+// VisitEachNode is a handy routine to execute a callback on every node in the subtree,
+// including the root itself. Depth first tree traversal.
 func VisitEachNode(root *uast.Node, payload func(*uast.Node)) {
 	queue := []*uast.Node{}
 	queue = append(queue, root)

+ 4 - 2
file.go

@@ -5,13 +5,13 @@ import (
 	"gopkg.in/src-d/hercules.v3/rbtree"
 )
 
-// A status is the something we would like to update during File.Update().
+// Status is the something we would like to keep track of in File.Update().
 type Status struct {
 	data   interface{}
 	update func(interface{}, int, int, int)
 }
 
-// A file encapsulates a balanced binary tree to store line intervals and
+// File encapsulates a balanced binary tree to store line intervals and
 // a cumulative mapping of values to the corresponding length counters. Users
 // are not supposed to create File-s directly; instead, they should call NewFile().
 // NewFileFromTree() is the special constructor which is useful in the tests.
@@ -27,6 +27,8 @@ type File struct {
 	statuses []Status
 }
 
+// NewStatus initializes a new instance of Status struct. It is needed to set the only two
+// private fields which are not supposed to be replaced during the whole lifetime.
 func NewStatus(data interface{}, update func(interface{}, int, int, int)) Status {
 	return Status{data: data, update: update}
 }

+ 3 - 3
file_test.go

@@ -44,9 +44,9 @@ func TestBullshitFile(t *testing.T) {
 	testPanicFile(t, func(file *File) { file.Update(1, 110, 10, 0) }, "insert")
 	testPanicFile(t, func(file *File) { file.Update(1, -10, 0, 10) }, "delete")
 	testPanicFile(t, func(file *File) { file.Update(1, 100, 0, 10) }, "delete")
-	testPanicFile(t, func(file *File) { file.Update(1, 0, -10, 0) }, "length")
-	testPanicFile(t, func(file *File) { file.Update(1, 0, 0, -10) }, "length")
-	testPanicFile(t, func(file *File) { file.Update(1, 0, -10, -10) }, "length")
+	testPanicFile(t, func(file *File) { file.Update(1, 0, -10, 0) }, "Length")
+	testPanicFile(t, func(file *File) { file.Update(1, 0, 0, -10) }, "Length")
+	testPanicFile(t, func(file *File) { file.Update(1, 0, -10, -10) }, "Length")
 	testPanicFile(t, func(file *File) { file.Update(-1, 0, 10, 10) }, "time")
 	file, status := fixtureFile()
 	file.Update(1, 10, 0, 0)

+ 24 - 7
identity.go

@@ -10,23 +10,40 @@ import (
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 )
 
+// IdentityDetector determines the author of a commit. Same person can commit under different
+// signatures, and we apply some heuristics to merge those together.
+// It is a PipelineItem.
 type IdentityDetector struct {
-	// Maps email || name  -> developer id.
+	// PeopleDict maps email || name  -> developer id.
 	PeopleDict map[string]int
-	// Maps developer id -> description
+	// ReversedPeopleDict maps developer id -> description
 	ReversedPeopleDict []string
 }
 
 const (
+	// AuthorMissing is the internal author index which denotes any unmatched identities
+	// (IdentityDetector.Consume()).
 	AuthorMissing   = (1 << 18) - 1
-	AuthorSelf      = (1 << 18) - 2
-	AuthorUnmatched = "<unmatched>"
+	// AuthorMissingName is the string name which corresponds to AuthorMissing.
+	AuthorMissingName = "<unmatched>"
 
+	// FactIdentityDetectorPeopleDict is the name of the fact which is inserted in
+	// IdentityDetector.Configure(). It corresponds to IdentityDetector.PeopleDict - the mapping
+	// from the signatures to the author indices.
 	FactIdentityDetectorPeopleDict         = "IdentityDetector.PeopleDict"
+	// FactIdentityDetectorReversedPeopleDict is the name of the fact which is inserted in
+	// IdentityDetector.Configure(). It corresponds to IdentityDetector.ReversedPeopleDict -
+	// the mapping from the author indices to the main signature.
 	FactIdentityDetectorReversedPeopleDict = "IdentityDetector.ReversedPeopleDict"
+	// ConfigIdentityDetectorPeopleDictPath is the name of the configuration option
+	// (IdentityDetector.Configure()) which allows to set the external PeopleDict mapping from a file.
 	ConfigIdentityDetectorPeopleDictPath   = "IdentityDetector.PeopleDictPath"
+	// FactIdentityDetectorPeopleCount is the name of the fact which is inserted in
+	// IdentityDetector.Configure(). It is equal to the overall number of unique authors
+	// (the length of ReversedPeopleDict).
 	FactIdentityDetectorPeopleCount        = "IdentityDetector.PeopleCount"
 
+	// DependencyAuthor is the name of the dependency provided by IdentityDetector.
 	DependencyAuthor = "author"
 )
 
@@ -67,10 +84,10 @@ func (id *IdentityDetector) Configure(facts map[string]interface{}) {
 			id.LoadPeopleDict(peopleDictPath)
 			facts[FactIdentityDetectorPeopleCount] = len(id.ReversedPeopleDict) - 1
 		} else {
-			if _, exists := facts[FactPipelineCommits]; !exists {
+			if _, exists := facts[ConfigPipelineCommits]; !exists {
 				panic("IdentityDetector needs a list of commits to initialize.")
 			}
-			id.GeneratePeopleDict(facts[FactPipelineCommits].([]*object.Commit))
+			id.GeneratePeopleDict(facts[ConfigPipelineCommits].([]*object.Commit))
 			facts[FactIdentityDetectorPeopleCount] = len(id.ReversedPeopleDict)
 		}
 	} else {
@@ -114,7 +131,7 @@ func (id *IdentityDetector) LoadPeopleDict(path string) error {
 		reverseDict = append(reverseDict, ids[0])
 		size++
 	}
-	reverseDict = append(reverseDict, AuthorUnmatched)
+	reverseDict = append(reverseDict, AuthorMissingName)
 	id.PeopleDict = dict
 	id.ReversedPeopleDict = reverseDict
 	return nil

+ 2 - 2
identity_test.go

@@ -160,7 +160,7 @@ func TestIdentityDetectorLoadPeopleDict(t *testing.T) {
 	assert.Equal(t, id.ReversedPeopleDict[0], "Linus Torvalds")
 	assert.Equal(t, id.ReversedPeopleDict[1], "Vadim Markovtsev")
 	assert.Equal(t, id.ReversedPeopleDict[2], "Máximo Cuadros")
-	assert.Equal(t, id.ReversedPeopleDict[3], AuthorUnmatched)
+	assert.Equal(t, id.ReversedPeopleDict[3], AuthorMissingName)
 }
 
 /*
@@ -229,7 +229,7 @@ func TestIdentityDetectorGeneratePeopleDict(t *testing.T) {
 	assert.Equal(t, id.ReversedPeopleDict[0], "vadim markovtsev|gmarkhor@gmail.com|vadim@sourced.tech")
 	assert.Equal(t, id.ReversedPeopleDict[1], "alexander bezzubov|bzz@apache.org")
 	assert.Equal(t, id.ReversedPeopleDict[2], "máximo cuadros|mcuadros@gmail.com")
-	assert.NotEqual(t, id.ReversedPeopleDict[len(id.ReversedPeopleDict)-1], AuthorUnmatched)
+	assert.NotEqual(t, id.ReversedPeopleDict[len(id.ReversedPeopleDict)-1], AuthorMissingName)
 }
 
 func TestIdentityDetectorLoadPeopleDictInvalidPath(t *testing.T) {

+ 19 - 12
pipeline.go

@@ -22,11 +22,11 @@ import (
 type ConfigurationOptionType int
 
 const (
-	// Boolean value type.
+	// BoolConfigurationOption reflects the boolean value type.
 	BoolConfigurationOption ConfigurationOptionType = iota
-	// Integer value type.
+	// IntConfigurationOption reflects the integer value type.
 	IntConfigurationOption
-	// String value type.
+	// StringConfigurationOption reflects the string value type.
 	StringConfigurationOption
 )
 
@@ -167,7 +167,7 @@ func (car *CommonAnalysisResult) FillMetadata(meta *pb.Metadata) *pb.Metadata {
 	return meta
 }
 
-// MetadataToCommonAnalysisResult() copies the data from a Protobuf message.
+// MetadataToCommonAnalysisResult copies the data from a Protobuf message.
 func MetadataToCommonAnalysisResult(meta *pb.Metadata) *CommonAnalysisResult {
 	return &CommonAnalysisResult{
 		BeginTime:     meta.BeginUnixTime,
@@ -177,8 +177,8 @@ func MetadataToCommonAnalysisResult(meta *pb.Metadata) *CommonAnalysisResult {
 	}
 }
 
-// The core Hercules entity which carries several PipelineItems and executes them.
-// See the extended example of how a Pipeline works in doc.go.
+// Pipeline is the core Hercules entity which carries several PipelineItems and executes them.
+// See the extended example of how a Pipeline works in doc.go
 type Pipeline struct {
 	// OnProgress is the callback which is invoked in Analyse() to output it's
 	// progress. The first argument is the number of processed commits and the
@@ -200,15 +200,20 @@ type Pipeline struct {
 }
 
 const (
-	// Makes Pipeline to save the DAG to the specified file.
+	// ConfigPipelineDumpPath is the name of the Pipeline configuration option (Pipeline.Initialize())
+	// which enables saving the items DAG to the specified file.
 	ConfigPipelineDumpPath = "Pipeline.DumpPath"
-	// Disables Configure() and Initialize() invokation on each PipelineItem during the initialization.
+	// ConfigPipelineDryRun is the name of the Pipeline configuration option (Pipeline.Initialize())
+	// which disables Configure() and Initialize() invocation on each PipelineItem during the
+	// Pipeline initialization.
 	// Subsequent Run() calls are going to fail. Useful with ConfigPipelineDumpPath=true.
 	ConfigPipelineDryRun = "Pipeline.DryRun"
-	// Allows to specify the custom commit chain. By default, Pipeline.Commits() is used.
-	FactPipelineCommits = "commits"
+	// ConfigPipelineCommits is the name of the Pipeline configuration option (Pipeline.Initialize())
+	// which allows to specify the custom commit sequence. By default, Pipeline.Commits() is used.
+	ConfigPipelineCommits = "commits"
 )
 
+// NewPipeline initializes a new instance of Pipeline struct.
 func NewPipeline(repository *git.Repository) *Pipeline {
 	return &Pipeline{
 		repository: repository,
@@ -481,8 +486,8 @@ func (pipeline *Pipeline) Initialize(facts map[string]interface{}) {
 	if facts == nil {
 		facts = map[string]interface{}{}
 	}
-	if _, exists := facts[FactPipelineCommits]; !exists {
-		facts[FactPipelineCommits] = pipeline.Commits()
+	if _, exists := facts[ConfigPipelineCommits]; !exists {
+		facts[ConfigPipelineCommits] = pipeline.Commits()
 	}
 	dumpPath, _ := facts[ConfigPipelineDumpPath].(string)
 	pipeline.resolve(dumpPath)
@@ -546,6 +551,8 @@ func (pipeline *Pipeline) Run(commits []*object.Commit) (map[LeafPipelineItem]in
 	return result, nil
 }
 
+// LoadCommitsFromFile reads the file by the specified FS path and generates the sequence of commits
+// by interpreting each line as a Git commit hash.
 func LoadCommitsFromFile(path string, repository *git.Repository) ([]*object.Commit, error) {
 	var file io.ReadCloser
 	if path != "-" {

+ 7 - 0
renames.go

@@ -13,6 +13,9 @@ import (
 	"gopkg.in/src-d/go-git.v4/utils/merkletrie"
 )
 
+// RenameAnalysis improves TreeDiff's results by searching for changed blobs under different
+// paths which are likely to be the result of a rename with subsequent edits.
+// RenameAnalysis is a PipelineItem.
 type RenameAnalysis struct {
 	// SimilarityThreshold adjusts the heuristic to determine file renames.
 	// It has the same units as cgit's -X rename-threshold or -M. Better to
@@ -23,8 +26,12 @@ type RenameAnalysis struct {
 }
 
 const (
+	// RenameAnalysisDefaultThreshold specifies the default percentage of common lines in a pair
+	// of files to consider them linked. The exact code of the decision is sizesAreClose().
 	RenameAnalysisDefaultThreshold = 90
 
+	// ConfigRenameAnalysisSimilarityThreshold is the name of the configuration option
+	// (RenameAnalysis.Configure()) which sets the similarity threshold.
 	ConfigRenameAnalysisSimilarityThreshold = "RenameAnalysis.SimilarityThreshold"
 )
 

+ 12 - 1
shotness.go

@@ -27,10 +27,19 @@ type ShotnessAnalysis struct {
 }
 
 const (
+	// ConfigShotnessXpathStruct is the name of the configuration option (ShotnessAnalysis.Configure())
+	// which sets the UAST XPath to choose the analysed nodes.
 	ConfigShotnessXpathStruct = "Shotness.XpathStruct"
+	// ConfigShotnessXpathName is the name of the configuration option (ShotnessAnalysis.Configure())
+	// which sets the UAST XPath to find the name of the nodes chosen by ConfigShotnessXpathStruct.
+	// These XPath-s can be different for some languages.
 	ConfigShotnessXpathName   = "Shotness.XpathName"
 
+	// DefaultShotnessXpathStruct is the default UAST XPath to choose the analysed nodes.
+	// It extracts functions.
 	DefaultShotnessXpathStruct = "//*[@roleFunction and @roleDeclaration]"
+	// DefaultShotnessXpathName is the default UAST XPath to choose the names of the analysed nodes.
+	// It looks at the current tree level and at the immediate children.
 	DefaultShotnessXpathName   = "/*[@roleFunction and @roleIdentifier and @roleName] | /*/*[@roleFunction and @roleIdentifier and @roleName]"
 )
 
@@ -40,6 +49,8 @@ type nodeShotness struct {
 	Couples map[string]int
 }
 
+// NodeSummary carries the node attributes which annotate the "shotness" analysis' counters.
+// These attributes are supposed to uniquely identify each node.
 type NodeSummary struct {
 	InternalRole string
 	Roles        []uast.Role
@@ -47,7 +58,7 @@ type NodeSummary struct {
 	File         string
 }
 
-// ShotnessResult is returned by Finalize() and represents the analysis result.
+// ShotnessResult is returned by ShotnessAnalysis.Finalize() and represents the analysis result.
 type ShotnessResult struct {
 	Nodes    []NodeSummary
 	Counters []map[int]int

+ 5 - 0
tree_diff.go

@@ -7,11 +7,16 @@ import (
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 )
 
+// TreeDiff generates the list of changes for a commit. A change can be either one or two blobs
+// under the same path: "before" and "after". If "before" is nil, the change is an addition.
+// If "after" is nil, the change is a removal. Otherwise, it is a modification.
+// TreeDiff is a PipelineItem.
 type TreeDiff struct {
 	previousTree *object.Tree
 }
 
 const (
+	// DependencyTreeChanges is the name of the dependency provided by TreeDiff.
 	DependencyTreeChanges = "changes"
 )
 

+ 23 - 0
uast.go

@@ -28,6 +28,8 @@ import (
 	"gopkg.in/src-d/hercules.v3/pb"
 )
 
+// UASTExtractor retrieves UASTs from Babelfish server which correspond to changed files in a commit.
+// It is a PipelineItem.
 type UASTExtractor struct {
 	Endpoint       string
 	Context        func() (context.Context, context.CancelFunc)
@@ -43,13 +45,26 @@ type UASTExtractor struct {
 const (
 	uastExtractionSkipped = -(1 << 31)
 
+	// ConfigUASTEndpoint is the name of the configuration option (UASTExtractor.Configure())
+	// which sets the Babelfish server address.
 	ConfigUASTEndpoint     = "ConfigUASTEndpoint"
+	// ConfigUASTTimeout is the name of the configuration option (UASTExtractor.Configure())
+	// which sets the maximum amount of time to wait for a Babelfish server response.
 	ConfigUASTTimeout      = "ConfigUASTTimeout"
+	// ConfigUASTPoolSize is the name of the configuration option (UASTExtractor.Configure())
+	// which sets the number of goroutines to run for UAST parse queries.
 	ConfigUASTPoolSize     = "ConfigUASTPoolSize"
+	// ConfigUASTFailOnErrors is the name of the configuration option (UASTExtractor.Configure())
+	// which enables early exit in case of any Babelfish UAST parsing errors.
 	ConfigUASTFailOnErrors = "ConfigUASTFailOnErrors"
+	// ConfigUASTLanguages is the name of the configuration option (UASTExtractor.Configure())
+	// which sets the list of languages to parse. Language names are at
+	// https://doc.bblf.sh/languages.html Names are joined with a comma ",".
 	ConfigUASTLanguages    = "ConfigUASTLanguages"
 
+	// FeatureUast is the name of the Pipeline feature which activates all the items related to UAST.
 	FeatureUast     = "uast"
+	// DependencyUasts is the name of the dependency provided by UASTExtractor.
 	DependencyUasts = "uasts"
 )
 
@@ -299,6 +314,7 @@ func (exr *UASTExtractor) extractTask(data interface{}) interface{} {
 	return nil
 }
 
+// UASTChange is the type of the items in the list of changes which is provided by UASTChanges.
 type UASTChange struct {
 	Before *uast.Node
 	After  *uast.Node
@@ -306,9 +322,12 @@ type UASTChange struct {
 }
 
 const (
+	// DependencyUastChanges is the name of the dependency provided by UASTChanges.
 	DependencyUastChanges = "changed_uasts"
 )
 
+// UASTChanges is a structured analog of TreeDiff: it provides UASTs for every logical change
+// in a commit. It is a PipelineItem.
 type UASTChanges struct {
 	cache map[plumbing.Hash]*uast.Node
 }
@@ -373,6 +392,8 @@ func (uc *UASTChanges) Consume(deps map[string]interface{}) (map[string]interfac
 	return map[string]interface{}{DependencyUastChanges: commit}, nil
 }
 
+// UASTChangesSaver dumps changed files and corresponding UASTs for every commit.
+// it is a LeafPipelineItem.
 type UASTChangesSaver struct {
 	// OutputPath points to the target directory with UASTs
 	OutputPath string
@@ -382,6 +403,8 @@ type UASTChangesSaver struct {
 }
 
 const (
+	// ConfigUASTChangesSaverOutputPath is the name of the configuration option
+	// (UASTChangesSaver.Configure()) which sets the target directory where to save the files.
 	ConfigUASTChangesSaverOutputPath = "UASTChangesSaver.OutputPath"
 )
 

+ 2 - 0
version.go

@@ -6,8 +6,10 @@ import (
 	"strings"
 )
 
+// BinaryGitHash is the Git hash of the Hercules binary file which is executing.
 var BinaryGitHash = "<unknown>"
 
+// BinaryVersion is Hercules' API version. It matches the package name.
 var BinaryVersion = 0
 
 type versionProbe struct{}