Bläddra i källkod

Rename 'days' to 'ticks', add tick-size configuration

Signed-off-by: Robert Lin <robertlin1@gmail.com>
Robert Lin 6 år sedan
förälder
incheckning
351c12e346

+ 5 - 5
core.go

@@ -122,9 +122,9 @@ const (
 	DependencyAuthor = identity.DependencyAuthor
 	// DependencyBlobCache identifies the dependency provided by BlobCache.
 	DependencyBlobCache = plumbing.DependencyBlobCache
-	// DependencyDay is the name of the dependency which DaysSinceStart provides - the number
-	// of days since the first commit in the analysed sequence.
-	DependencyDay = plumbing.DependencyDay
+	// DependencyTick is the name of the dependency which TicksSinceStart provides - the number
+	// of ticks since the first commit in the analysed sequence.
+	DependencyTick = plumbing.DependencyTick
 	// DependencyFileDiff is the name of the dependency provided by FileDiff.
 	DependencyFileDiff = plumbing.DependencyFileDiff
 	// DependencyTreeChanges is the name of the dependency provided by TreeDiff.
@@ -133,8 +133,8 @@ const (
 	DependencyUastChanges = uast.DependencyUastChanges
 	// DependencyUasts is the name of the dependency provided by Extractor.
 	DependencyUasts = uast.DependencyUasts
-	// FactCommitsByDay contains the mapping between day indices and the corresponding commits.
-	FactCommitsByDay = plumbing.FactCommitsByDay
+	// FactCommitsByTick contains the mapping between tick indices and the corresponding commits.
+	FactCommitsByTick = plumbing.FactCommitsByTick
 	// FactIdentityDetectorPeopleCount is the name of the fact which is inserted in
 	// identity.Detector.Configure(). It is equal to the overall number of unique authors
 	// (the length of ReversedPeopleDict).

+ 1 - 1
internal/burndown/file.go

@@ -31,7 +31,7 @@ type File struct {
 // TreeEnd denotes the value of the last leaf in the tree.
 const TreeEnd = math.MaxUint32
 
-// TreeMaxBinPower is the binary power value which corresponds to the maximum day which
+// TreeMaxBinPower is the binary power value which corresponds to the maximum tick which
 // can be stored in the tree.
 const TreeMaxBinPower = 14
 

+ 0 - 135
internal/plumbing/day.go

@@ -1,135 +0,0 @@
-package plumbing
-
-import (
-	"log"
-	"time"
-
-	"gopkg.in/src-d/go-git.v4"
-	"gopkg.in/src-d/go-git.v4/plumbing"
-	"gopkg.in/src-d/go-git.v4/plumbing/object"
-	"gopkg.in/src-d/hercules.v9/internal/core"
-)
-
-// DaysSinceStart provides the relative date information for every commit.
-// It is a PipelineItem.
-type DaysSinceStart struct {
-	core.NoopMerger
-	remote      string
-	day0        *time.Time
-	previousDay int
-	commits     map[int][]plumbing.Hash
-}
-
-const (
-	// DependencyDay is the name of the dependency which DaysSinceStart provides - the number
-	// of days since the first commit in the analysed sequence.
-	DependencyDay = "day"
-
-	// FactCommitsByDay contains the mapping between day indices and the corresponding commits.
-	FactCommitsByDay = "DaysSinceStart.Commits"
-)
-
-// Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
-func (days *DaysSinceStart) Name() string {
-	return "DaysSinceStart"
-}
-
-// Provides returns the list of names of entities which are produced by this PipelineItem.
-// Each produced entity will be inserted into `deps` of dependent Consume()-s according
-// to this list. Also used by core.Registry to build the global map of providers.
-func (days *DaysSinceStart) Provides() []string {
-	arr := [...]string{DependencyDay}
-	return arr[:]
-}
-
-// Requires returns the list of names of entities which are needed by this PipelineItem.
-// Each requested entity will be inserted into `deps` of Consume(). In turn, those
-// entities are Provides() upstream.
-func (days *DaysSinceStart) Requires() []string {
-	return []string{}
-}
-
-// ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
-func (days *DaysSinceStart) ListConfigurationOptions() []core.ConfigurationOption {
-	return []core.ConfigurationOption{}
-}
-
-// Configure sets the properties previously published by ListConfigurationOptions().
-func (days *DaysSinceStart) Configure(facts map[string]interface{}) error {
-	if days.commits == nil {
-		days.commits = map[int][]plumbing.Hash{}
-	}
-	facts[FactCommitsByDay] = days.commits
-	return nil
-}
-
-// Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
-// calls. The repository which is going to be analysed is supplied as an argument.
-func (days *DaysSinceStart) Initialize(repository *git.Repository) error {
-	days.day0 = &time.Time{}
-	days.previousDay = 0
-	if len(days.commits) > 0 {
-		keys := make([]int, len(days.commits))
-		for key := range days.commits {
-			keys = append(keys, key)
-		}
-		for _, key := range keys {
-			delete(days.commits, key)
-		}
-	}
-	if r, err := repository.Remotes(); err == nil && len(r) > 0 {
-		days.remote = r[0].Config().URLs[0]
-	}
-	return nil
-}
-
-// Consume runs this PipelineItem on the next commit data.
-// `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
-// Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
-// This function returns the mapping with analysis results. The keys must be the same as
-// in Provides(). If there was an error, nil is returned.
-func (days *DaysSinceStart) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
-	commit := deps[core.DependencyCommit].(*object.Commit)
-	index := deps[core.DependencyIndex].(int)
-	if index == 0 {
-		// first iteration - initialize the file objects from the tree
-		// our precision is 1 day
-		*days.day0 = commit.Committer.When.Truncate(24 * time.Hour)
-		if days.day0.Unix() < 631152000 { // 01.01.1990, that was 30 years ago
-			log.Println()
-			log.Printf("Warning: suspicious committer timestamp in %s > %s",
-				days.remote, commit.Hash.String())
-		}
-	}
-	day := int(commit.Committer.When.Sub(*days.day0).Hours() / 24)
-	if day < days.previousDay {
-		// rebase works miracles, but we need the monotonous time
-		day = days.previousDay
-	}
-	days.previousDay = day
-	dayCommits := days.commits[day]
-	if dayCommits == nil {
-		dayCommits = []plumbing.Hash{}
-	}
-	exists := false
-	if commit.NumParents() > 0 {
-		for i := range dayCommits {
-			if dayCommits[len(dayCommits)-i-1] == commit.Hash {
-				exists = true
-			}
-		}
-	}
-	if !exists {
-		days.commits[day] = append(dayCommits, commit.Hash)
-	}
-	return map[string]interface{}{DependencyDay: day}, nil
-}
-
-// Fork clones this PipelineItem.
-func (days *DaysSinceStart) Fork(n int) []core.PipelineItem {
-	return core.ForkCopyPipelineItem(days, n)
-}
-
-func init() {
-	core.Registry.Register(&DaysSinceStart{})
-}

+ 0 - 156
internal/plumbing/day_test.go

@@ -1,156 +0,0 @@
-package plumbing
-
-import (
-	"bytes"
-	"log"
-	"os"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-	"gopkg.in/src-d/go-git.v4/plumbing"
-	"gopkg.in/src-d/hercules.v9/internal/core"
-	"gopkg.in/src-d/hercules.v9/internal/test"
-)
-
-func fixtureDaysSinceStart() *DaysSinceStart {
-	dss := DaysSinceStart{}
-	dss.Configure(map[string]interface{}{})
-	dss.Initialize(test.Repository)
-	return &dss
-}
-
-func TestDaysSinceStartMeta(t *testing.T) {
-	dss := fixtureDaysSinceStart()
-	assert.Equal(t, dss.Name(), "DaysSinceStart")
-	assert.Equal(t, len(dss.Provides()), 1)
-	assert.Equal(t, dss.Provides()[0], DependencyDay)
-	assert.Equal(t, len(dss.Requires()), 0)
-	assert.Len(t, dss.ListConfigurationOptions(), 0)
-	dss.Configure(map[string]interface{}{})
-}
-
-func TestDaysSinceStartRegistration(t *testing.T) {
-	summoned := core.Registry.Summon((&DaysSinceStart{}).Name())
-	assert.Len(t, summoned, 1)
-	assert.Equal(t, summoned[0].Name(), "DaysSinceStart")
-	summoned = core.Registry.Summon((&DaysSinceStart{}).Provides()[0])
-	assert.Len(t, summoned, 1)
-	assert.Equal(t, summoned[0].Name(), "DaysSinceStart")
-}
-
-func TestDaysSinceStartConsume(t *testing.T) {
-	dss := fixtureDaysSinceStart()
-	deps := map[string]interface{}{}
-	commit, _ := test.Repository.CommitObject(plumbing.NewHash(
-		"cce947b98a050c6d356bc6ba95030254914027b1"))
-	deps[core.DependencyCommit] = commit
-	deps[core.DependencyIndex] = 0
-	res, err := dss.Consume(deps)
-	assert.Nil(t, err)
-	assert.Equal(t, res[DependencyDay].(int), 0)
-	assert.Equal(t, dss.previousDay, 0)
-	assert.Equal(t, dss.day0.Hour(), 1)   // 18 UTC+1
-	assert.Equal(t, dss.day0.Minute(), 0) // 30
-	assert.Equal(t, dss.day0.Second(), 0) // 29
-
-	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
-		"fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"))
-	deps[core.DependencyCommit] = commit
-	deps[core.DependencyIndex] = 10
-	res, err = dss.Consume(deps)
-	assert.Nil(t, err)
-	assert.Equal(t, res[DependencyDay].(int), 1)
-	assert.Equal(t, dss.previousDay, 1)
-
-	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
-		"a3ee37f91f0d705ec9c41ae88426f0ae44b2fbc3"))
-	deps[core.DependencyCommit] = commit
-	deps[core.DependencyIndex] = 20
-	res, err = dss.Consume(deps)
-	assert.Nil(t, err)
-	assert.Equal(t, res[DependencyDay].(int), 1)
-	assert.Equal(t, dss.previousDay, 1)
-
-	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
-		"a8b665a65d7aced63f5ba2ff6d9b71dac227f8cf"))
-	deps[core.DependencyCommit] = commit
-	deps[core.DependencyIndex] = 20
-	res, err = dss.Consume(deps)
-	assert.Nil(t, err)
-	assert.Equal(t, res[DependencyDay].(int), 2)
-	assert.Equal(t, dss.previousDay, 2)
-
-	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
-		"186ff0d7e4983637bb3762a24d6d0a658e7f4712"))
-	deps[core.DependencyCommit] = commit
-	deps[core.DependencyIndex] = 30
-	res, err = dss.Consume(deps)
-	assert.Nil(t, err)
-	assert.Equal(t, res[DependencyDay].(int), 2)
-	assert.Equal(t, dss.previousDay, 2)
-
-	assert.Len(t, dss.commits, 3)
-	assert.Equal(t, dss.commits[0], []plumbing.Hash{plumbing.NewHash(
-		"cce947b98a050c6d356bc6ba95030254914027b1")})
-	assert.Equal(t, dss.commits[1], []plumbing.Hash{
-		plumbing.NewHash("fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"),
-		plumbing.NewHash("a3ee37f91f0d705ec9c41ae88426f0ae44b2fbc3")})
-	assert.Equal(t, dss.commits[2], []plumbing.Hash{
-		plumbing.NewHash("a8b665a65d7aced63f5ba2ff6d9b71dac227f8cf"),
-		plumbing.NewHash("186ff0d7e4983637bb3762a24d6d0a658e7f4712")})
-}
-
-func TestDaysCommits(t *testing.T) {
-	dss := fixtureDaysSinceStart()
-	dss.commits[0] = []plumbing.Hash{plumbing.NewHash(
-		"cce947b98a050c6d356bc6ba95030254914027b1")}
-	commits := dss.commits
-	dss.Initialize(test.Repository)
-	assert.Len(t, dss.commits, 0)
-	assert.Equal(t, dss.commits, commits)
-}
-
-func TestDaysSinceStartFork(t *testing.T) {
-	dss1 := fixtureDaysSinceStart()
-	dss1.commits[0] = []plumbing.Hash{plumbing.NewHash(
-		"cce947b98a050c6d356bc6ba95030254914027b1")}
-	clones := dss1.Fork(1)
-	assert.Len(t, clones, 1)
-	dss2 := clones[0].(*DaysSinceStart)
-	assert.Equal(t, dss1.day0, dss2.day0)
-	assert.Equal(t, dss1.previousDay, dss2.previousDay)
-	assert.Equal(t, dss1.commits, dss2.commits)
-	dss1.commits[0] = append(dss1.commits[0], plumbing.ZeroHash)
-	assert.Len(t, dss2.commits[0], 2)
-	assert.True(t, dss1 != dss2)
-	// just for the sake of it
-	dss1.Merge([]core.PipelineItem{dss2})
-}
-
-func TestDaysSinceStartConsumeZero(t *testing.T) {
-	dss := fixtureDaysSinceStart()
-	deps := map[string]interface{}{}
-	commit, _ := test.Repository.CommitObject(plumbing.NewHash(
-		"cce947b98a050c6d356bc6ba95030254914027b1"))
-	commit.Committer.When = time.Unix(0, 0)
-	deps[core.DependencyCommit] = commit
-	deps[core.DependencyIndex] = 0
-	// print warning to log
-	myOutput := &bytes.Buffer{}
-	log.SetOutput(myOutput)
-	defer func() {
-		log.SetOutput(os.Stderr)
-	}()
-	res, err := dss.Consume(deps)
-	assert.Nil(t, err)
-	assert.Contains(t, myOutput.String(), "Warning")
-	assert.Contains(t, myOutput.String(), "cce947b98a050c6d356bc6ba95030254914027b1")
-	assert.Contains(t, myOutput.String(), "hercules")
-	assert.Contains(t, myOutput.String(), "github.com")
-	assert.Equal(t, res[DependencyDay].(int), 0)
-	assert.Equal(t, dss.previousDay, 0)
-	assert.Equal(t, dss.day0.Year(), 1970)
-	assert.Equal(t, dss.day0.Minute(), 0)
-	assert.Equal(t, dss.day0.Second(), 0)
-}

+ 158 - 0
internal/plumbing/ticks.go

@@ -0,0 +1,158 @@
+package plumbing
+
+import (
+	"log"
+	"time"
+
+	"gopkg.in/src-d/go-git.v4"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/object"
+	"gopkg.in/src-d/hercules.v9/internal/core"
+)
+
+// TicksSinceStart provides relative tick information for every commit.
+// It is a PipelineItem.
+type TicksSinceStart struct {
+	core.NoopMerger
+	remote       string
+	tickSize     time.Duration
+	tick0        *time.Time
+	previousTick int
+	commits      map[int][]plumbing.Hash
+}
+
+const (
+	// DependencyTick is the name of the dependency which DaysSinceStart provides - the number
+	// of ticks since the first commit in the analysed sequence.
+	DependencyTick = "tick"
+
+	// FactCommitsByTick contains the mapping between day indices and the corresponding commits.
+	FactCommitsByTick = "TicksSinceStart.Commits"
+
+	// ConfigTicksSinceStartTickSize sets the size of each 'tick' in hours.
+	ConfigTicksSinceStartTickSize = "TicksSinceStart.TickSize"
+
+	// DefaultTicksSinceStartTickSize is the default number of hours in each 'tick' (24*hour = 1day).
+	DefaultTicksSinceStartTickSize = 24
+)
+
+// Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
+func (ticks *TicksSinceStart) Name() string {
+	return "TicksSinceStart"
+}
+
+// Provides returns the list of names of entities which are produced by this PipelineItem.
+// Each produced entity will be inserted into `deps` of dependent Consume()-s according
+// to this list. Also used by core.Registry to build the global map of providers.
+func (ticks *TicksSinceStart) Provides() []string {
+	arr := [...]string{DependencyTick}
+	return arr[:]
+}
+
+// Requires returns the list of names of entities which are needed by this PipelineItem.
+// Each requested entity will be inserted into `deps` of Consume(). In turn, those
+// entities are Provides() upstream.
+func (ticks *TicksSinceStart) Requires() []string {
+	return []string{}
+}
+
+// ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
+func (ticks *TicksSinceStart) ListConfigurationOptions() []core.ConfigurationOption {
+	return []core.ConfigurationOption{{
+		Name:        ConfigTicksSinceStartTickSize,
+		Description: "How long each 'tick' represents in hours.",
+		Flag:        "tick-size",
+		Type:        core.IntConfigurationOption,
+		Default:     DefaultTicksSinceStartTickSize},
+	}
+}
+
+// Configure sets the properties previously published by ListConfigurationOptions().
+func (ticks *TicksSinceStart) Configure(facts map[string]interface{}) error {
+	if val, exists := facts[ConfigTicksSinceStartTickSize].(int); exists {
+		ticks.tickSize = time.Duration(val) * time.Hour
+	} else {
+		// default to 1 day
+		ticks.tickSize = 24 * time.Hour
+	}
+	if ticks.commits == nil {
+		ticks.commits = map[int][]plumbing.Hash{}
+	}
+	facts[FactCommitsByTick] = ticks.commits
+	return nil
+}
+
+// Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
+// calls. The repository which is going to be analysed is supplied as an argument.
+func (ticks *TicksSinceStart) Initialize(repository *git.Repository) error {
+	ticks.tick0 = &time.Time{}
+	ticks.previousTick = 0
+	if len(ticks.commits) > 0 {
+		keys := make([]int, len(ticks.commits))
+		for key := range ticks.commits {
+			keys = append(keys, key)
+		}
+		for _, key := range keys {
+			delete(ticks.commits, key)
+		}
+	}
+	if r, err := repository.Remotes(); err == nil && len(r) > 0 {
+		ticks.remote = r[0].Config().URLs[0]
+	}
+	return nil
+}
+
+// Consume runs this PipelineItem on the next commit data.
+// `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
+// Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
+// This function returns the mapping with analysis results. The keys must be the same as
+// in Provides(). If there was an error, nil is returned.
+func (ticks *TicksSinceStart) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
+	commit := deps[core.DependencyCommit].(*object.Commit)
+	index := deps[core.DependencyIndex].(int)
+	if index == 0 {
+		// first iteration - initialize the file objects from the tree
+		// our precision is 1 day
+		*ticks.tick0 = commit.Committer.When
+		if ticks.tick0.Unix() < 631152000 { // 01.01.1990, that was 30 years ago
+			log.Println()
+			log.Printf("Warning: suspicious committer timestamp in %s > %s: %d",
+				ticks.remote, commit.Hash.String(), ticks.tick0.Unix())
+		}
+	}
+
+	tick := int(commit.Committer.When.Sub(*ticks.tick0) / ticks.tickSize)
+	if tick < ticks.previousTick {
+		// rebase works miracles, but we need the monotonous time
+		tick = ticks.previousTick
+	}
+
+	ticks.previousTick = tick
+	tickCommits := ticks.commits[tick]
+	if tickCommits == nil {
+		tickCommits = []plumbing.Hash{}
+	}
+
+	exists := false
+	if commit.NumParents() > 0 {
+		for i := range tickCommits {
+			if tickCommits[len(tickCommits)-i-1] == commit.Hash {
+				exists = true
+			}
+		}
+	}
+	if !exists {
+		ticks.commits[tick] = append(tickCommits, commit.Hash)
+	}
+
+	return map[string]interface{}{DependencyTick: tick}, nil
+}
+
+// Fork clones this PipelineItem.
+func (ticks *TicksSinceStart) Fork(n int) []core.PipelineItem {
+	return core.ForkCopyPipelineItem(ticks, n)
+}
+
+func init() {
+	core.Registry.Register(&TicksSinceStart{})
+}

+ 213 - 0
internal/plumbing/ticks_test.go

@@ -0,0 +1,213 @@
+package plumbing
+
+import (
+	"bytes"
+	"log"
+	"os"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/hercules.v9/internal/core"
+	"gopkg.in/src-d/hercules.v9/internal/test"
+)
+
+func fixtureTicksSinceStart(config ...map[string]interface{}) *TicksSinceStart {
+	tss := TicksSinceStart{
+		tickSize: 24 * time.Hour,
+	}
+	if len(config) != 1 {
+		config = []map[string]interface{}{{}}
+	}
+	tss.Configure(config[0])
+	tss.Initialize(test.Repository)
+	return &tss
+}
+
+func TestTicksSinceStartMeta(t *testing.T) {
+	tss := fixtureTicksSinceStart()
+	assert.Equal(t, tss.Name(), "TicksSinceStart")
+	assert.Equal(t, len(tss.Provides()), 1)
+	assert.Equal(t, tss.Provides()[0], DependencyTick)
+	assert.Equal(t, len(tss.Requires()), 0)
+	assert.Len(t, tss.ListConfigurationOptions(), 1)
+	tss.Configure(map[string]interface{}{})
+}
+
+func TestTicksSinceStartRegistration(t *testing.T) {
+	summoned := core.Registry.Summon((&TicksSinceStart{}).Name())
+	assert.Len(t, summoned, 1)
+	assert.Equal(t, summoned[0].Name(), "TicksSinceStart")
+	summoned = core.Registry.Summon((&TicksSinceStart{}).Provides()[0])
+	assert.Len(t, summoned, 1)
+	assert.Equal(t, summoned[0].Name(), "TicksSinceStart")
+}
+
+func TestTicksSinceStartConsume(t *testing.T) {
+	tss := fixtureTicksSinceStart()
+	deps := map[string]interface{}{}
+	commit, _ := test.Repository.CommitObject(plumbing.NewHash(
+		"cce947b98a050c6d356bc6ba95030254914027b1"))
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 0
+	res, err := tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, 0, res[DependencyTick].(int))
+	assert.Equal(t, 0, tss.previousTick)
+	assert.Equal(t, 18, tss.tick0.Hour())   // 18 UTC+1
+	assert.Equal(t, 30, tss.tick0.Minute()) // 30
+	assert.Equal(t, 29, tss.tick0.Second()) // 29
+
+	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
+		"fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"))
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 10
+	res, err = tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, res[DependencyTick].(int), 1)
+	assert.Equal(t, tss.previousTick, 1)
+
+	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
+		"a3ee37f91f0d705ec9c41ae88426f0ae44b2fbc3"))
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 20
+	res, err = tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, res[DependencyTick].(int), 1)
+	assert.Equal(t, tss.previousTick, 1)
+
+	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
+		"a8b665a65d7aced63f5ba2ff6d9b71dac227f8cf"))
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 20
+	res, err = tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, res[DependencyTick].(int), 2)
+	assert.Equal(t, tss.previousTick, 2)
+
+	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
+		"186ff0d7e4983637bb3762a24d6d0a658e7f4712"))
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 30
+	res, err = tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, res[DependencyTick].(int), 2)
+	assert.Equal(t, tss.previousTick, 2)
+
+	assert.Len(t, tss.commits, 3)
+	assert.Equal(t, tss.commits[0], []plumbing.Hash{plumbing.NewHash(
+		"cce947b98a050c6d356bc6ba95030254914027b1")})
+	assert.Equal(t, tss.commits[1], []plumbing.Hash{
+		plumbing.NewHash("fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"),
+		plumbing.NewHash("a3ee37f91f0d705ec9c41ae88426f0ae44b2fbc3")})
+	assert.Equal(t, tss.commits[2], []plumbing.Hash{
+		plumbing.NewHash("a8b665a65d7aced63f5ba2ff6d9b71dac227f8cf"),
+		plumbing.NewHash("186ff0d7e4983637bb3762a24d6d0a658e7f4712")})
+}
+
+func TestTicksSinceStartConsumeWithTickSize(t *testing.T) {
+	tss := fixtureTicksSinceStart(map[string]interface{}{
+		ConfigTicksSinceStartTickSize: 1, // 1x hour
+	})
+	commit, _ := test.Repository.CommitObject(plumbing.NewHash(
+		"cce947b98a050c6d356bc6ba95030254914027b1"))
+	deps := map[string]interface{}{
+		core.DependencyCommit: commit,
+		core.DependencyIndex:  0,
+	}
+	res, err := tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, 0, res[DependencyTick].(int))
+	assert.Equal(t, 0, tss.previousTick)
+	assert.Equal(t, 18, tss.tick0.Hour())   // 18 UTC+1
+	assert.Equal(t, 30, tss.tick0.Minute()) // 30
+	assert.Equal(t, 29, tss.tick0.Second()) // 29
+
+	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
+		"fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"))
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 10
+	res, err = tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, 24, res[DependencyTick].(int)) // 1 day later
+	assert.Equal(t, 24, tss.previousTick)
+
+	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
+		"a3ee37f91f0d705ec9c41ae88426f0ae44b2fbc3"))
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 20
+	res, err = tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, 24, res[DependencyTick].(int)) // 1 day later
+	assert.Equal(t, 24, tss.previousTick)
+
+	assert.Len(t, tss.commits, 2)
+	assert.Equal(t, []plumbing.Hash{plumbing.NewHash(
+		"cce947b98a050c6d356bc6ba95030254914027b1")},
+		tss.commits[0])
+	assert.Equal(t, []plumbing.Hash{
+		plumbing.NewHash("fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"),
+		plumbing.NewHash("a3ee37f91f0d705ec9c41ae88426f0ae44b2fbc3")},
+		tss.commits[24])
+}
+
+func TestTicksCommits(t *testing.T) {
+	tss := fixtureTicksSinceStart()
+	tss.commits[0] = []plumbing.Hash{plumbing.NewHash(
+		"cce947b98a050c6d356bc6ba95030254914027b1")}
+	commits := tss.commits
+	tss.Initialize(test.Repository)
+	assert.Len(t, tss.commits, 0)
+	assert.Equal(t, tss.commits, commits)
+}
+
+func TestTicksSinceStartFork(t *testing.T) {
+	tss1 := fixtureTicksSinceStart()
+	tss1.commits[0] = []plumbing.Hash{plumbing.NewHash(
+		"cce947b98a050c6d356bc6ba95030254914027b1")}
+	clones := tss1.Fork(1)
+	assert.Len(t, clones, 1)
+	tss2 := clones[0].(*TicksSinceStart)
+	assert.Equal(t, tss1.tick0, tss2.tick0)
+	assert.Equal(t, tss1.previousTick, tss2.previousTick)
+	assert.Equal(t, tss1.commits, tss2.commits)
+	tss1.commits[0] = append(tss1.commits[0], plumbing.ZeroHash)
+	assert.Len(t, tss2.commits[0], 2)
+	assert.True(t, tss1 != tss2)
+	// just for the sake of it
+	tss1.Merge([]core.PipelineItem{tss2})
+}
+
+func TestTicksSinceStartConsumeZero(t *testing.T) {
+	tss := fixtureTicksSinceStart()
+	deps := map[string]interface{}{}
+	commit, _ := test.Repository.CommitObject(plumbing.NewHash(
+		"cce947b98a050c6d356bc6ba95030254914027b1"))
+	commit.Committer.When = time.Unix(0, 0)
+	deps[core.DependencyCommit] = commit
+	deps[core.DependencyIndex] = 0
+	// print warning to log
+	myOutput := &bytes.Buffer{}
+	log.SetOutput(myOutput)
+	defer func() {
+		log.SetOutput(os.Stderr)
+	}()
+	res, err := tss.Consume(deps)
+	assert.Nil(t, err)
+	output := myOutput.String()
+	assert.Contains(t, output, "Warning")
+	assert.Contains(t, output, "cce947b98a050c6d356bc6ba95030254914027b1")
+	assert.Contains(t, output, "hercules")
+	// depending on where the contributor clones this project from, the remote
+	// reported in the error could either be from gopkg.in or github.com
+	if !strings.Contains(output, "github.com") && !strings.Contains(output, "gopkg.in") {
+		assert.Failf(t, "output should contain either 'github.com' or 'gopkg.in':\n%s", output)
+	}
+	assert.Equal(t, res[DependencyTick].(int), 0)
+	assert.Equal(t, tss.previousTick, 0)
+	assert.Equal(t, tss.tick0.Year(), 1969)
+	assert.Equal(t, tss.tick0.Minute(), 0)
+	assert.Equal(t, tss.tick0.Second(), 0)
+}

+ 102 - 98
leaves/burndown.go

@@ -31,12 +31,12 @@ import (
 // It is a LeafPipelineItem.
 // Reference: https://erikbern.com/2016/12/05/the-half-life-of-code.html
 type BurndownAnalysis struct {
-	// Granularity sets the size of each band - the number of days it spans.
+	// Granularity sets the size of each band - the number of ticks it spans.
 	// Smaller values provide better resolution but require more work and eat more
-	// memory. 30 days is usually enough.
+	// memory. 30 ticks is usually enough.
 	Granularity int
 	// Sampling sets how detailed is the statistic - the size of the interval in
-	// days between consecutive measurements. It may not be greater than Granularity. Try 15 or 30.
+	// ticks between consecutive measurements. It may not be greater than Granularity. Try 15 or 30.
 	Sampling int
 
 	// TrackFiles enables or disables the fine-grained per-file burndown analysis.
@@ -67,9 +67,9 @@ type BurndownAnalysis struct {
 	// Repository points to the analysed Git repository struct from go-git.
 	repository *git.Repository
 	// globalHistory is the daily deltas of daily line counts.
-	// E.g. day 0: day 0 +50 lines
-	//      day 10: day 0 -10 lines; day 10 +20 lines
-	//      day 12: day 0 -5 lines; day 10 -3 lines; day 12 +10 lines
+	// E.g. tick 0: tick 0 +50 lines
+	//      tick 10: tick 0 -10 lines; tick 10 +20 lines
+	//      tick 12: tick 0 -5 lines; tick 10 -3 lines; tick 12 +10 lines
 	// map [0] [0] = 50
 	// map[10] [0] = -10
 	// map[10][10] = 20
@@ -95,11 +95,11 @@ type BurndownAnalysis struct {
 	renames map[string]string
 	// matrix is the mutual deletions and self insertions.
 	matrix []map[int]int64
-	// day is the most recent day index processed.
-	day int
-	// previousDay is the day from the previous sample period -
-	// different from DaysSinceStart.previousDay.
-	previousDay int
+	// tick is the most recent tick index processed.
+	tick int
+	// previousTick is the tick from the previous sample period -
+	// different from TicksSinceStart.previousTick.
+	previousTick int
 	// references IdentityDetector.ReversedPeopleDict
 	reversedPeopleDict []string
 }
@@ -160,7 +160,7 @@ const (
 	ConfigBurndownHibernationDirectory = "Burndown.HibernationDirectory"
 	// ConfigBurndownDebug enables some extra debug assertions.
 	ConfigBurndownDebug = "Burndown.Debug"
-	// DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
+	// DefaultBurndownGranularity is the default number of ticks for BurndownAnalysis.Granularity
 	// and BurndownAnalysis.Sampling.
 	DefaultBurndownGranularity = 30
 	// authorSelf is the internal author index which is used in BurndownAnalysis.Finalize() to
@@ -192,7 +192,7 @@ func (analyser *BurndownAnalysis) Provides() []string {
 func (analyser *BurndownAnalysis) Requires() []string {
 	arr := [...]string{
 		items.DependencyFileDiff, items.DependencyTreeChanges, items.DependencyBlobCache,
-		items.DependencyDay, identity.DependencyAuthor}
+		items.DependencyTick, identity.DependencyAuthor}
 	return arr[:]
 }
 
@@ -200,12 +200,12 @@ func (analyser *BurndownAnalysis) Requires() []string {
 func (analyser *BurndownAnalysis) ListConfigurationOptions() []core.ConfigurationOption {
 	options := [...]core.ConfigurationOption{{
 		Name:        ConfigBurndownGranularity,
-		Description: "How many days there are in a single band.",
+		Description: "How many time ticks there are in a single band.",
 		Flag:        "granularity",
 		Type:        core.IntConfigurationOption,
 		Default:     DefaultBurndownGranularity}, {
 		Name:        ConfigBurndownSampling,
-		Description: "How frequently to record the state in days.",
+		Description: "How frequently to record the state in time ticks.",
 		Flag:        "sampling",
 		Type:        core.IntConfigurationOption,
 		Default:     DefaultBurndownGranularity}, {
@@ -298,12 +298,12 @@ func (analyser *BurndownAnalysis) Description() string {
 // calls. The repository which is going to be analysed is supplied as an argument.
 func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) error {
 	if analyser.Granularity <= 0 {
-		log.Printf("Warning: adjusted the granularity to %d days\n",
+		log.Printf("Warning: adjusted the granularity to %d ticks\n",
 			DefaultBurndownGranularity)
 		analyser.Granularity = DefaultBurndownGranularity
 	}
 	if analyser.Sampling <= 0 {
-		log.Printf("Warning: adjusted the sampling to %d days\n",
+		log.Printf("Warning: adjusted the sampling to %d ticks\n",
 			DefaultBurndownGranularity)
 		analyser.Sampling = DefaultBurndownGranularity
 	}
@@ -326,8 +326,8 @@ func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) error {
 	analyser.mergedAuthor = identity.AuthorMissing
 	analyser.renames = map[string]string{}
 	analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)
-	analyser.day = 0
-	analyser.previousDay = 0
+	analyser.tick = 0
+	analyser.previousTick = 0
 	return nil
 }
 
@@ -341,14 +341,14 @@ func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[stri
 		panic("BurndownAnalysis.Consume() was called on a hibernated instance")
 	}
 	author := deps[identity.DependencyAuthor].(int)
-	day := deps[items.DependencyDay].(int)
+	tick := deps[items.DependencyTick].(int)
 	if !deps[core.DependencyIsMerge].(bool) {
-		analyser.day = day
-		analyser.onNewDay()
+		analyser.tick = tick
+		analyser.onNewTick()
 	} else {
 		// effectively disables the status updates if the commit is a merge
 		// we will analyse the conflicts resolution in Merge()
-		analyser.day = burndown.TreeMergeMark
+		analyser.tick = burndown.TreeMergeMark
 		analyser.mergedFiles = map[string]bool{}
 		analyser.mergedAuthor = author
 	}
@@ -370,8 +370,8 @@ func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[stri
 			return nil, err
 		}
 	}
-	// in case there is a merge analyser.day equals to TreeMergeMark
-	analyser.day = day
+	// in case there is a merge analyser.tick equals to TreeMergeMark
+	analyser.tick = tick
 	return nil, nil
 }
 
@@ -431,7 +431,9 @@ func (analyser *BurndownAnalysis) Merge(branches []core.PipelineItem) {
 			// it could be also removed in the merge commit itself
 			continue
 		}
-		files[0].Merge(analyser.packPersonWithDay(analyser.mergedAuthor, analyser.day), files[1:]...)
+		files[0].Merge(
+			analyser.packPersonWithTick(analyser.mergedAuthor, analyser.tick),
+			files[1:]...)
 		for _, burn := range all {
 			if burn.files[key] != files[0] {
 				if burn.files[key] != nil {
@@ -441,7 +443,7 @@ func (analyser *BurndownAnalysis) Merge(branches []core.PipelineItem) {
 			}
 		}
 	}
-	analyser.onNewDay()
+	analyser.onNewTick()
 }
 
 // Hibernate compresses the bound RBTree memory with the files.
@@ -486,14 +488,14 @@ func (analyser *BurndownAnalysis) Boot() error {
 
 // Finalize returns the result of the analysis. Further Consume() calls are not expected.
 func (analyser *BurndownAnalysis) Finalize() interface{} {
-	globalHistory, lastDay := analyser.groupSparseHistory(analyser.globalHistory, -1)
+	globalHistory, lastTick := analyser.groupSparseHistory(analyser.globalHistory, -1)
 	fileHistories := map[string]DenseHistory{}
 	fileOwnership := map[string]map[int]int{}
 	for key, history := range analyser.fileHistories {
 		if len(history) == 0 {
 			continue
 		}
-		fileHistories[key], _ = analyser.groupSparseHistory(history, lastDay)
+		fileHistories[key], _ = analyser.groupSparseHistory(history, lastTick)
 		file := analyser.files[key]
 		previousLine := 0
 		previousAuthor := identity.AuthorMissing
@@ -505,7 +507,7 @@ func (analyser *BurndownAnalysis) Finalize() interface{} {
 				ownership[previousAuthor] += length
 			}
 			previousLine = line
-			previousAuthor, _ = analyser.unpackPersonWithDay(int(value))
+			previousAuthor, _ = analyser.unpackPersonWithTick(int(value))
 			if previousAuthor == identity.AuthorMissing {
 				previousAuthor = -1
 			}
@@ -515,7 +517,7 @@ func (analyser *BurndownAnalysis) Finalize() interface{} {
 	for i, history := range analyser.peopleHistories {
 		if len(history) > 0 {
 			// there can be people with only trivial merge commits and without own lines
-			peopleHistories[i], _ = analyser.groupSparseHistory(history, lastDay)
+			peopleHistories[i], _ = analyser.groupSparseHistory(history, lastTick)
 		} else {
 			peopleHistories[i] = make(DenseHistory, len(globalHistory))
 			for j, gh := range globalHistory {
@@ -712,15 +714,15 @@ func (analyser *BurndownAnalysis) MergeResults(
 }
 
 func roundTime(unix int64, dir bool) int {
-	days := float64(unix) / (3600 * 24)
+	ticks := float64(unix) / (3600 * 24)
 	if dir {
-		return int(math.Ceil(days))
+		return int(math.Ceil(ticks))
 	}
-	return int(math.Floor(days))
+	return int(math.Floor(ticks))
 }
 
 // mergeMatrices takes two [number of samples][number of bands] matrices,
-// resamples them to days so that they become square, sums and resamples back to the
+// resamples them to ticks so that they become square, sums and resamples back to the
 // least of (sampling1, sampling2) and (granularity1, granularity2).
 func mergeMatrices(m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int,
 	c1, c2 *core.CommonAnalysisResult) DenseHistory {
@@ -769,7 +771,7 @@ func mergeMatrices(m1, m2 DenseHistory, granularity1, sampling1, granularity2, s
 	return result
 }
 
-// Explode `matrix` so that it is daily sampled and has daily bands, shift by `offset` days
+// Explode `matrix` so that it is daily sampled and has daily bands, shift by `offset` ticks
 // and add to the accumulator. `daily` size is square and is guaranteed to fit `matrix` by
 // the caller.
 // Rows: *at least* len(matrix) * sampling + offset
@@ -935,7 +937,7 @@ func addBurndownMatrix(matrix DenseHistory, granularity, sampling int, accdaily
 	for y := len(matrix) * sampling; y+offset < len(daily); y++ {
 		copy(daily[y+offset], daily[len(matrix)*sampling-1+offset])
 	}
-	// the original matrix has been resampled by day
+	// the original matrix has been resampled by tick
 	// add it to the accumulator
 	for y, row := range daily {
 		for x, val := range row {
@@ -1054,87 +1056,88 @@ func checkClose(c io.Closer) {
 	}
 }
 
-// We do a hack and store the day in the first 14 bits and the author index in the last 18.
+// We do a hack and store the tick in the first 14 bits and the author index in the last 18.
 // Strictly speaking, int can be 64-bit and then the author index occupies 32+18 bits.
 // This hack is needed to simplify the values storage inside File-s. We can compare
-// different values together and they are compared as days for the same author.
-func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {
+// different values together and they are compared as ticks for the same author.
+func (analyser *BurndownAnalysis) packPersonWithTick(person int, tick int) int {
 	if analyser.PeopleNumber == 0 {
-		return day
+		return tick
 	}
-	result := day & burndown.TreeMergeMark
+	result := tick & burndown.TreeMergeMark
 	result |= person << burndown.TreeMaxBinPower
-	// This effectively means max (16383 - 1) days (>44 years) and (262143 - 3) devs.
-	// One day less because burndown.TreeMergeMark = ((1 << 14) - 1) is a special day.
+	// This effectively means max (16383 - 1) ticks (>44 years) and (262143 - 3) devs.
+	// One tick less because burndown.TreeMergeMark = ((1 << 14) - 1) is a special tick.
 	// Three devs less because:
-	// - math.MaxUint32 is the special rbtree value with day == TreeMergeMark (-1)
+	// - math.MaxUint32 is the special rbtree value with tick == TreeMergeMark (-1)
 	// - identity.AuthorMissing (-2)
 	// - authorSelf (-3)
 	return result
 }
 
-func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {
+func (analyser *BurndownAnalysis) unpackPersonWithTick(value int) (int, int) {
 	if analyser.PeopleNumber == 0 {
 		return identity.AuthorMissing, value
 	}
 	return value >> burndown.TreeMaxBinPower, value & burndown.TreeMergeMark
 }
 
-func (analyser *BurndownAnalysis) onNewDay() {
-	if analyser.day > analyser.previousDay {
-		analyser.previousDay = analyser.day
+func (analyser *BurndownAnalysis) onNewTick() {
+	if analyser.tick > analyser.previousTick {
+		analyser.previousTick = analyser.tick
 	}
 	analyser.mergedAuthor = identity.AuthorMissing
 }
 
 func (analyser *BurndownAnalysis) updateGlobal(currentTime, previousTime, delta int) {
-	_, currentDay := analyser.unpackPersonWithDay(currentTime)
-	_, previousDay := analyser.unpackPersonWithDay(previousTime)
-	currentHistory := analyser.globalHistory[currentDay]
+	_, curTick := analyser.unpackPersonWithTick(currentTime)
+	_, prevTick := analyser.unpackPersonWithTick(previousTime)
+
+	currentHistory := analyser.globalHistory[curTick]
 	if currentHistory == nil {
 		currentHistory = map[int]int64{}
-		analyser.globalHistory[currentDay] = currentHistory
+		analyser.globalHistory[curTick] = currentHistory
 	}
-	currentHistory[previousDay] += int64(delta)
+	currentHistory[prevTick] += int64(delta)
 }
 
 // updateFile is bound to the specific `history` in the closure.
 func (analyser *BurndownAnalysis) updateFile(
 	history sparseHistory, currentTime, previousTime, delta int) {
 
-	_, currentDay := analyser.unpackPersonWithDay(currentTime)
-	_, previousDay := analyser.unpackPersonWithDay(previousTime)
+	_, curTick := analyser.unpackPersonWithTick(currentTime)
+	_, prevTick := analyser.unpackPersonWithTick(previousTime)
 
-	currentHistory := history[currentDay]
+	currentHistory := history[curTick]
 	if currentHistory == nil {
 		currentHistory = map[int]int64{}
-		history[currentDay] = currentHistory
+		history[curTick] = currentHistory
 	}
-	currentHistory[previousDay] += int64(delta)
+	currentHistory[prevTick] += int64(delta)
 }
 
 func (analyser *BurndownAnalysis) updateAuthor(currentTime, previousTime, delta int) {
-	previousAuthor, previousDay := analyser.unpackPersonWithDay(previousTime)
+	previousAuthor, prevTick := analyser.unpackPersonWithTick(previousTime)
 	if previousAuthor == identity.AuthorMissing {
 		return
 	}
-	_, currentDay := analyser.unpackPersonWithDay(currentTime)
+	_, curTick := analyser.unpackPersonWithTick(currentTime)
 	history := analyser.peopleHistories[previousAuthor]
 	if history == nil {
 		history = sparseHistory{}
 		analyser.peopleHistories[previousAuthor] = history
 	}
-	currentHistory := history[currentDay]
+	currentHistory := history[curTick]
 	if currentHistory == nil {
 		currentHistory = map[int]int64{}
-		history[currentDay] = currentHistory
+		history[curTick] = currentHistory
 	}
-	currentHistory[previousDay] += int64(delta)
+	currentHistory[prevTick] += int64(delta)
 }
 
 func (analyser *BurndownAnalysis) updateMatrix(currentTime, previousTime, delta int) {
-	newAuthor, _ := analyser.unpackPersonWithDay(currentTime)
-	oldAuthor, _ := analyser.unpackPersonWithDay(previousTime)
+	newAuthor, _ := analyser.unpackPersonWithTick(currentTime)
+	oldAuthor, _ := analyser.unpackPersonWithTick(previousTime)
 
 	if oldAuthor == identity.AuthorMissing {
 		return
@@ -1156,7 +1159,8 @@ func (analyser *BurndownAnalysis) updateMatrix(currentTime, previousTime, delta
 }
 
 func (analyser *BurndownAnalysis) newFile(
-	hash plumbing.Hash, name string, author int, day int, size int) (*burndown.File, error) {
+	hash plumbing.Hash, name string, author int, tick int, size int) (*burndown.File, error) {
+
 	updaters := make([]burndown.Updater, 1)
 	updaters[0] = analyser.updateGlobal
 	if analyser.TrackFiles {
@@ -1173,9 +1177,9 @@ func (analyser *BurndownAnalysis) newFile(
 	if analyser.PeopleNumber > 0 {
 		updaters = append(updaters, analyser.updateAuthor)
 		updaters = append(updaters, analyser.updateMatrix)
-		day = analyser.packPersonWithDay(author, day)
+		tick = analyser.packPersonWithTick(author, tick)
 	}
-	return burndown.NewFile(day, size, analyser.fileAllocator, updaters...), nil
+	return burndown.NewFile(tick, size, analyser.fileAllocator, updaters...), nil
 }
 
 func (analyser *BurndownAnalysis) handleInsertion(
@@ -1192,12 +1196,12 @@ func (analyser *BurndownAnalysis) handleInsertion(
 		return fmt.Errorf("file %s already exists", name)
 	}
 	var hash plumbing.Hash
-	if analyser.day != burndown.TreeMergeMark {
+	if analyser.tick != burndown.TreeMergeMark {
 		hash = blob.Hash
 	}
-	file, err = analyser.newFile(hash, name, author, analyser.day, lines)
+	file, err = analyser.newFile(hash, name, author, analyser.tick, lines)
 	analyser.files[name] = file
-	if analyser.day == burndown.TreeMergeMark {
+	if analyser.tick == burndown.TreeMergeMark {
 		analyser.mergedFiles[name] = true
 	}
 	return err
@@ -1222,7 +1226,7 @@ func (analyser *BurndownAnalysis) handleDeletion(
 	if !exists {
 		return nil
 	}
-	file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)
+	file.Update(analyser.packPersonWithTick(author, analyser.tick), 0, 0, lines)
 	file.Delete()
 	delete(analyser.files, name)
 	delete(analyser.fileHistories, name)
@@ -1237,7 +1241,7 @@ func (analyser *BurndownAnalysis) handleDeletion(
 			}
 		}
 	}
-	if analyser.day == burndown.TreeMergeMark {
+	if analyser.tick == burndown.TreeMergeMark {
 		analyser.mergedFiles[name] = false
 	}
 	return nil
@@ -1247,7 +1251,7 @@ func (analyser *BurndownAnalysis) handleModification(
 	change *object.Change, author int, cache map[plumbing.Hash]*items.CachedBlob,
 	diffs map[string]items.FileDiffData) error {
 
-	if analyser.day == burndown.TreeMergeMark {
+	if analyser.tick == burndown.TreeMergeMark {
 		analyser.mergedFiles[change.To.Name] = true
 	}
 	file, exists := analyser.files[change.From.Name]
@@ -1297,10 +1301,10 @@ func (analyser *BurndownAnalysis) handleModification(
 	apply := func(edit diffmatchpatch.Diff) {
 		length := utf8.RuneCountInString(edit.Text)
 		if edit.Type == diffmatchpatch.DiffInsert {
-			file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)
+			file.Update(analyser.packPersonWithTick(author, analyser.tick), position, length, 0)
 			position += length
 		} else {
-			file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)
+			file.Update(analyser.packPersonWithTick(author, analyser.tick), position, 0, length)
 		}
 		if analyser.Debug {
 			file.Validate()
@@ -1315,7 +1319,7 @@ func (analyser *BurndownAnalysis) handleModification(
 		length := utf8.RuneCountInString(edit.Text)
 		debugError := func() {
 			log.Printf("%s: internal diff error\n", change.To.Name)
-			log.Printf("Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,
+			log.Printf("Update(%d, %d, %d (0), %d (0))\n", analyser.tick, position,
 				length, utf8.RuneCountInString(pending.Text))
 			if dumpBefore != "" {
 				log.Printf("====TREE BEFORE====\n%s====END====\n", dumpBefore)
@@ -1335,7 +1339,7 @@ func (analyser *BurndownAnalysis) handleModification(
 					debugError()
 					return errors.New("DiffInsert may not appear after DiffInsert")
 				}
-				file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,
+				file.Update(analyser.packPersonWithTick(author, analyser.tick), position, length,
 					utf8.RuneCountInString(pending.Text))
 				if analyser.Debug {
 					file.Validate()
@@ -1378,7 +1382,7 @@ func (analyser *BurndownAnalysis) handleRename(from, to string) error {
 	}
 	delete(analyser.files, from)
 	analyser.files[to] = file
-	if analyser.day == burndown.TreeMergeMark {
+	if analyser.tick == burndown.TreeMergeMark {
 		analyser.mergedFiles[from] = false
 	}
 
@@ -1428,37 +1432,37 @@ func (analyser *BurndownAnalysis) handleRename(from, to string) error {
 }
 
 func (analyser *BurndownAnalysis) groupSparseHistory(
-	history sparseHistory, lastDay int) (DenseHistory, int) {
+	history sparseHistory, lastTick int) (DenseHistory, int) {
 
 	if len(history) == 0 {
 		panic("empty history")
 	}
-	var days []int
-	for day := range history {
-		days = append(days, day)
+	var ticks []int
+	for tick := range history {
+		ticks = append(ticks, tick)
 	}
-	sort.Ints(days)
-	if lastDay >= 0 {
-		if days[len(days)-1] < lastDay {
-			days = append(days, lastDay)
-		} else if days[len(days)-1] > lastDay {
-			panic("days corruption")
+	sort.Ints(ticks)
+	if lastTick >= 0 {
+		if ticks[len(ticks)-1] < lastTick {
+			ticks = append(ticks, lastTick)
+		} else if ticks[len(ticks)-1] > lastTick {
+			panic("ticks corruption")
 		}
 	} else {
-		lastDay = days[len(days)-1]
+		lastTick = ticks[len(ticks)-1]
 	}
 	// [y][x]
 	// y - sampling
 	// x - granularity
-	samples := lastDay/analyser.Sampling + 1
-	bands := lastDay/analyser.Granularity + 1
+	samples := lastTick/analyser.Sampling + 1
+	bands := lastTick/analyser.Granularity + 1
 	result := make(DenseHistory, samples)
 	for i := 0; i < bands; i++ {
 		result[i] = make([]int64, bands)
 	}
 	prevsi := 0
-	for _, day := range days {
-		si := day / analyser.Sampling
+	for _, tick := range ticks {
+		si := tick / analyser.Sampling
 		if si > prevsi {
 			state := result[prevsi]
 			for i := prevsi + 1; i <= si; i++ {
@@ -1467,11 +1471,11 @@ func (analyser *BurndownAnalysis) groupSparseHistory(
 			prevsi = si
 		}
 		sample := result[si]
-		for bday, value := range history[day] {
-			sample[bday/analyser.Granularity] += value
+		for t, value := range history[tick] {
+			sample[t/analyser.Granularity] += value
 		}
 	}
-	return result, lastDay
+	return result, lastTick
 }
 
 func init() {

+ 8 - 8
leaves/burndown_test.go

@@ -38,7 +38,7 @@ func TestBurndownMeta(t *testing.T) {
 	assert.Len(t, bd.Provides(), 0)
 	required := [...]string{
 		items.DependencyFileDiff, items.DependencyTreeChanges, items.DependencyBlobCache,
-		items.DependencyDay, identity.DependencyAuthor}
+		items.DependencyTick, identity.DependencyAuthor}
 	for _, name := range required {
 		assert.Contains(t, bd.Requires(), name)
 	}
@@ -142,7 +142,7 @@ func TestBurndownConsumeFinalize(t *testing.T) {
 
 	// stage 1
 	deps[identity.DependencyAuthor] = 0
-	deps[items.DependencyDay] = 0
+	deps[items.DependencyTick] = 0
 	cache := map[plumbing.Hash]*items.CachedBlob{}
 	AddHash(t, cache, "291286b4ac41952cbd1389fda66420ec03c1a9fe")
 	AddHash(t, cache, "c29112dbd697ad9b401333b80c18a63951bc18d9")
@@ -202,7 +202,7 @@ func TestBurndownConsumeFinalize(t *testing.T) {
 	result, err = bd.Consume(deps)
 	assert.Nil(t, result)
 	assert.Nil(t, err)
-	assert.Equal(t, bd.previousDay, 0)
+	assert.Equal(t, bd.previousTick, 0)
 	assert.Len(t, bd.files, 3)
 	assert.Equal(t, bd.files["cmd/hercules/main.go"].Len(), 207)
 	assert.Equal(t, bd.files["analyser.go"].Len(), 926)
@@ -237,7 +237,7 @@ func TestBurndownConsumeFinalize(t *testing.T) {
 	// stage 2
 	// 2b1ed978194a94edeabbca6de7ff3b5771d4d665
 	deps[core.DependencyIsMerge] = false
-	deps[items.DependencyDay] = 30
+	deps[items.DependencyTick] = 30
 	cache = map[plumbing.Hash]*items.CachedBlob{}
 	AddHash(t, cache, "291286b4ac41952cbd1389fda66420ec03c1a9fe")
 	AddHash(t, cache, "baa64828831d174f40140e4b3cfa77d1e917a2c1")
@@ -304,7 +304,7 @@ func TestBurndownConsumeFinalize(t *testing.T) {
 	result, err = bd.Consume(deps)
 	assert.Nil(t, result)
 	assert.Nil(t, err)
-	assert.Equal(t, bd.previousDay, 30)
+	assert.Equal(t, bd.previousTick, 30)
 	assert.Len(t, bd.files, 2)
 	assert.Equal(t, bd.files["cmd/hercules/main.go"].Len(), 290)
 	assert.Equal(t, bd.files["burndown.go"].Len(), 543)
@@ -360,7 +360,7 @@ func TestBurndownConsumeFinalize(t *testing.T) {
 
 func TestBurndownConsumeMergeAuthorMissing(t *testing.T) {
 	deps := map[string]interface{}{}
-	deps[items.DependencyDay] = 0
+	deps[items.DependencyTick] = 0
 	cache := map[plumbing.Hash]*items.CachedBlob{}
 	AddHash(t, cache, "291286b4ac41952cbd1389fda66420ec03c1a9fe")
 	AddHash(t, cache, "c29112dbd697ad9b401333b80c18a63951bc18d9")
@@ -476,7 +476,7 @@ func bakeBurndownForSerialization(t *testing.T, firstAuthor, secondAuthor int) (
 	deps := map[string]interface{}{}
 	// stage 1
 	deps[identity.DependencyAuthor] = firstAuthor
-	deps[items.DependencyDay] = 0
+	deps[items.DependencyTick] = 0
 	cache := map[plumbing.Hash]*items.CachedBlob{}
 	AddHash(t, cache, "291286b4ac41952cbd1389fda66420ec03c1a9fe")
 	AddHash(t, cache, "c29112dbd697ad9b401333b80c18a63951bc18d9")
@@ -537,7 +537,7 @@ func bakeBurndownForSerialization(t *testing.T, firstAuthor, secondAuthor int) (
 	// stage 2
 	// 2b1ed978194a94edeabbca6de7ff3b5771d4d665
 	deps[identity.DependencyAuthor] = secondAuthor
-	deps[items.DependencyDay] = 30
+	deps[items.DependencyTick] = 30
 	cache = map[plumbing.Hash]*items.CachedBlob{}
 	AddHash(t, cache, "291286b4ac41952cbd1389fda66420ec03c1a9fe")
 	AddHash(t, cache, "baa64828831d174f40140e4b3cfa77d1e917a2c1")

+ 2 - 2
leaves/devs.go

@@ -74,7 +74,7 @@ func (devs *DevsAnalysis) Provides() []string {
 // entities are Provides() upstream.
 func (devs *DevsAnalysis) Requires() []string {
 	arr := [...]string{
-		identity.DependencyAuthor, items.DependencyTreeChanges, items.DependencyDay,
+		identity.DependencyAuthor, items.DependencyTreeChanges, items.DependencyTick,
 		items.DependencyLanguages, items.DependencyLineStats}
 	return arr[:]
 }
@@ -133,7 +133,7 @@ func (devs *DevsAnalysis) Consume(deps map[string]interface{}) (map[string]inter
 	if len(treeDiff) == 0 && !devs.ConsiderEmptyCommits {
 		return nil, nil
 	}
-	day := deps[items.DependencyDay].(int)
+	day := deps[items.DependencyTick].(int)
 	devsDay, exists := devs.days[day]
 	if !exists {
 		devsDay = map[int]*DevDay{}

+ 3 - 3
leaves/devs_test.go

@@ -31,7 +31,7 @@ func TestDevsMeta(t *testing.T) {
 	assert.Equal(t, len(d.Requires()), 5)
 	assert.Equal(t, d.Requires()[0], identity.DependencyAuthor)
 	assert.Equal(t, d.Requires()[1], items.DependencyTreeChanges)
-	assert.Equal(t, d.Requires()[2], items.DependencyDay)
+	assert.Equal(t, d.Requires()[2], items.DependencyTick)
 	assert.Equal(t, d.Requires()[3], items.DependencyLanguages)
 	assert.Equal(t, d.Requires()[4], items.DependencyLineStats)
 	assert.Equal(t, d.Flag(), "devs")
@@ -77,7 +77,7 @@ func TestDevsConsumeFinalize(t *testing.T) {
 
 	// stage 1
 	deps[identity.DependencyAuthor] = 0
-	deps[items.DependencyDay] = 0
+	deps[items.DependencyTick] = 0
 	cache := map[plumbing.Hash]*items.CachedBlob{}
 	AddHash(t, cache, "291286b4ac41952cbd1389fda66420ec03c1a9fe")
 	AddHash(t, cache, "c29112dbd697ad9b401333b80c18a63951bc18d9")
@@ -228,7 +228,7 @@ func TestDevsConsumeFinalize(t *testing.T) {
 	assert.Equal(t, dev.Languages["Go"].Removed, 9*2)
 	assert.Equal(t, dev.Languages["Go"].Changed, 67*2)
 
-	deps[items.DependencyDay] = 1
+	deps[items.DependencyTick] = 1
 	result, err = devs.Consume(deps)
 	assert.Nil(t, result)
 	assert.Nil(t, err)