瀏覽代碼

Merge pull request #279 from vmarkovtsev/master

Ticks and --devs
Vadim Markovtsev 6 年之前
父節點
當前提交
154f0a023e
共有 11 個文件被更改,包括 521 次插入960 次删除
  1. 1 1
      Makefile
  2. 12 3
      cmd/hercules/combine.go
  3. 325 886
      internal/pb/pb.pb.go
  4. 3 0
      internal/pb/pb.proto
  5. 14 3
      internal/plumbing/ticks.go
  6. 20 4
      internal/plumbing/ticks_test.go
  7. 23 17
      leaves/burndown.go
  8. 19 19
      leaves/burndown_test.go
  9. 31 0
      leaves/devs.go
  10. 42 3
      leaves/devs_test.go
  11. 31 24
      python/labours/pb_pb2.py

+ 1 - 1
Makefile

@@ -25,7 +25,7 @@ internal/pb/pb.pb.go: internal/pb/pb.proto ${GOPATH}/bin/protoc-gen-gogo.exe
 endif
 
 python/labours/pb_pb2.py: internal/pb/pb.proto
-	protoc --python_out python/hercules --proto_path=internal/pb internal/pb/pb.proto
+	protoc --python_out python/labours --proto_path=internal/pb internal/pb/pb.proto
 
 cmd/hercules/plugin_template_source.go: cmd/hercules/plugin.template
 	cd cmd/hercules && go generate

+ 12 - 3
cmd/hercules/combine.go

@@ -59,7 +59,10 @@ var combineCmd = &cobra.Command{
 			bar.Increment()
 			anotherResults, anotherMetadata, errs := loadMessage(fileName, &repos)
 			if anotherMetadata != nil {
-				mergeResults(mergedResults, mergedMetadata, anotherResults, anotherMetadata, only)
+				mergeErrs := mergeResults(mergedResults, mergedMetadata, anotherResults, anotherMetadata, only)
+				for _, err := range mergeErrs {
+					errs = append(errs, err.Error())
+				}
 			}
 			allErrors[fileName] = errs
 			debug.FreeOSMemory()
@@ -173,7 +176,8 @@ func mergeResults(mergedResults map[string]interface{},
 	mergedCommons *hercules.CommonAnalysisResult,
 	anotherResults map[string]interface{},
 	anotherCommons *hercules.CommonAnalysisResult,
-	only string) {
+	only string) []error {
+	var errors []error
 	for key, val := range anotherResults {
 		if only != "" && key != only {
 			continue
@@ -185,13 +189,18 @@ func mergeResults(mergedResults map[string]interface{},
 		}
 		item := hercules.Registry.Summon(key)[0].(hercules.ResultMergeablePipelineItem)
 		mergedResult = item.MergeResults(mergedResult, val, mergedCommons, anotherCommons)
-		mergedResults[key] = mergedResult
+		if err, isErr := mergedResult.(error); isErr {
+			errors = append(errors, fmt.Errorf("could not merge %s: %v", item.Name(), err))
+		} else {
+			mergedResults[key] = mergedResult
+		}
 	}
 	if mergedCommons.CommitsNumber == 0 {
 		*mergedCommons = *anotherCommons
 	} else {
 		mergedCommons.Merge(anotherCommons)
 	}
+	return errors
 }
 
 func getOptionsString() string {

文件差異過大導致無法顯示
+ 325 - 886
internal/pb/pb.pb.go


+ 3 - 0
internal/pb/pb.proto

@@ -136,7 +136,10 @@ message TickDevs {
 
 message DevsAnalysisResults {
     map<int32, TickDevs> ticks = 1;
+    // developer identities, the indexes correspond to TickDevs' keys.
     repeated string dev_index = 2;
+    // how long each tick is, as an int64 nanosecond count (Go's time.Duration)
+    int64 tick_size = 8;
 }
 
 message Sentiment {

+ 14 - 3
internal/plumbing/ticks.go

@@ -123,11 +123,12 @@ func (ticks *TicksSinceStart) Consume(deps map[string]interface{}) (map[string]i
 	if index == 0 {
 		// first iteration - initialize the file objects from the tree
 		// our precision is 1 day
-		*ticks.tick0 = commit.Committer.When
-		if ticks.tick0.Unix() < 631152000 { // 01.01.1990, that was 30 years ago
+		tick0 := commit.Committer.When
+		if tick0.Unix() < 631152000 { // 01.01.1990, that was 30 years ago
 			ticks.l.Warnf("suspicious committer timestamp in %s > %s: %d",
-				ticks.remote, commit.Hash.String(), ticks.tick0.Unix())
+				ticks.remote, commit.Hash.String(), tick0.Unix())
 		}
+		*ticks.tick0 = FloorTime(tick0, ticks.TickSize)
 	}
 
 	tick := int(commit.Committer.When.Sub(*ticks.tick0) / ticks.TickSize)
@@ -163,6 +164,16 @@ func (ticks *TicksSinceStart) Fork(n int) []core.PipelineItem {
 	return core.ForkCopyPipelineItem(ticks, n)
 }
 
+// FloorTime is the missing implementation of time.Time.Floor() - round to the nearest less than or equal.
+func FloorTime(t time.Time, d time.Duration) time.Time {
+	// We have check if the regular rounding resulted in Floor() + d.
+	result := t.Round(d)
+	if result.After(t) {
+		result = result.Add(-d)
+	}
+	return result
+}
+
 func init() {
 	core.Registry.Register(&TicksSinceStart{})
 }

+ 20 - 4
internal/plumbing/ticks_test.go

@@ -49,6 +49,7 @@ func TestTicksSinceStartRegistration(t *testing.T) {
 
 func TestTicksSinceStartConsume(t *testing.T) {
 	tss := fixtureTicksSinceStart()
+	tss.TickSize = time.Second
 	deps := map[string]interface{}{}
 	commit, _ := test.Repository.CommitObject(plumbing.NewHash(
 		"cce947b98a050c6d356bc6ba95030254914027b1"))
@@ -58,10 +59,25 @@ func TestTicksSinceStartConsume(t *testing.T) {
 	assert.Nil(t, err)
 	assert.Equal(t, 0, res[DependencyTick].(int))
 	assert.Equal(t, 0, tss.previousTick)
+	assert.Equal(t, 2016, tss.tick0.Year())
+	assert.Equal(t, time.Month(12), tss.tick0.Month())
+	assert.Equal(t, 12, tss.tick0.Day())
 	assert.Equal(t, 18, tss.tick0.Hour())   // 18 UTC+1
 	assert.Equal(t, 30, tss.tick0.Minute()) // 30
 	assert.Equal(t, 29, tss.tick0.Second()) // 29
 
+	tss = fixtureTicksSinceStart()
+	res, err = tss.Consume(deps)
+	assert.Nil(t, err)
+	assert.Equal(t, 0, res[DependencyTick].(int))
+	assert.Equal(t, 0, tss.previousTick)
+	assert.Equal(t, 2016, tss.tick0.Year())
+	assert.Equal(t, time.Month(12), tss.tick0.Month())
+	assert.Equal(t, 12, tss.tick0.Day())
+	assert.Equal(t, 1, tss.tick0.Hour()) // UTC+1
+	assert.Equal(t, 0, tss.tick0.Minute())
+	assert.Equal(t, 0, tss.tick0.Second())
+
 	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
 		"fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"))
 	deps[core.DependencyCommit] = commit
@@ -123,9 +139,9 @@ func TestTicksSinceStartConsumeWithTickSize(t *testing.T) {
 	assert.Nil(t, err)
 	assert.Equal(t, 0, res[DependencyTick].(int))
 	assert.Equal(t, 0, tss.previousTick)
-	assert.Equal(t, 18, tss.tick0.Hour())   // 18 UTC+1
-	assert.Equal(t, 30, tss.tick0.Minute()) // 30
-	assert.Equal(t, 29, tss.tick0.Second()) // 29
+	assert.Equal(t, 18, tss.tick0.Hour())  // 18 UTC+1
+	assert.Equal(t, 0, tss.tick0.Minute()) // 30
+	assert.Equal(t, 0, tss.tick0.Second()) // 29
 
 	commit, _ = test.Repository.CommitObject(plumbing.NewHash(
 		"fc9ceecb6dabcb2aab60e8619d972e8d8208a7df"))
@@ -160,7 +176,7 @@ func TestTicksCommits(t *testing.T) {
 	tss.commits[0] = []plumbing.Hash{plumbing.NewHash(
 		"cce947b98a050c6d356bc6ba95030254914027b1")}
 	commits := tss.commits
-	tss.Initialize(test.Repository)
+	assert.NoError(t, tss.Initialize(test.Repository))
 	assert.Len(t, tss.commits, 0)
 	assert.Equal(t, tss.commits, commits)
 }

+ 23 - 17
leaves/burndown.go

@@ -133,8 +133,6 @@ type BurndownResult struct {
 	// The rest of the elements are equal the number of line removals by the corresponding
 	// authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.
 	PeopleMatrix DenseHistory
-	// The size of each tick.
-	TickSize time.Duration
 
 	// The following members are private.
 
@@ -142,6 +140,8 @@ type BurndownResult struct {
 	// Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
 	// facts[FactIdentityDetectorReversedPeopleDict].
 	reversedPeopleDict []string
+	// tickSize references TicksSinceStart.tickSize
+	tickSize time.Duration
 	// sampling and granularity are copied from BurndownAnalysis and stored for service purposes
 	// such as merging several results together.
 	sampling    int
@@ -571,7 +571,7 @@ func (analyser *BurndownAnalysis) Finalize() interface{} {
 		FileOwnership:      fileOwnership,
 		PeopleHistories:    peopleHistories,
 		PeopleMatrix:       peopleMatrix,
-		TickSize:           analyser.tickSize,
+		tickSize:           analyser.tickSize,
 		reversedPeopleDict: analyser.reversedPeopleDict,
 		sampling:           analyser.Sampling,
 		granularity:        analyser.Granularity,
@@ -613,7 +613,7 @@ func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, er
 		GlobalHistory: convertCSR(msg.Project),
 		FileHistories: map[string]DenseHistory{},
 		FileOwnership: map[string]map[int]int{},
-		TickSize:      time.Duration(msg.GetTickSize()),
+		tickSize:      time.Duration(msg.TickSize),
 
 		granularity: int(msg.Granularity),
 		sampling:    int(msg.Sampling),
@@ -649,17 +649,17 @@ func (analyser *BurndownAnalysis) MergeResults(
 	r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {
 	bar1 := r1.(BurndownResult)
 	bar2 := r2.(BurndownResult)
-	if bar1.TickSize != bar2.TickSize {
+	if bar1.tickSize != bar2.tickSize {
 		return fmt.Errorf("mismatching tick sizes (r1: %d, r2: %d) received",
-			bar1.TickSize, bar2.TickSize)
+			bar1.tickSize, bar2.tickSize)
 	}
 	// for backwards-compatibility, if no tick size is present set to default
-	analyser.tickSize = bar1.TickSize
+	analyser.tickSize = bar1.tickSize
 	if analyser.tickSize == 0 {
 		analyser.tickSize = items.DefaultTicksSinceStartTickSize * time.Hour
 	}
 	merged := BurndownResult{
-		TickSize: analyser.tickSize,
+		tickSize: analyser.tickSize,
 	}
 	if bar1.sampling < bar2.sampling {
 		merged.sampling = bar1.sampling
@@ -683,6 +683,7 @@ func (analyser *BurndownAnalysis) MergeResults(
 				bar1.GlobalHistory, bar2.GlobalHistory,
 				bar1.granularity, bar1.sampling,
 				bar2.granularity, bar2.sampling,
+				bar1.tickSize,
 				c1, c2)
 		}()
 	}
@@ -706,6 +707,7 @@ func (analyser *BurndownAnalysis) MergeResults(
 						m1, m2,
 						bar1.granularity, bar1.sampling,
 						bar2.granularity, bar2.sampling,
+						bar1.tickSize,
 						c1, c2,
 					)
 				}(i)
@@ -755,8 +757,11 @@ func (analyser *BurndownAnalysis) MergeResults(
 	return merged
 }
 
-func (analyser *BurndownAnalysis) roundTime(unix int64, dir bool) int {
-	ticks := float64(unix) / analyser.tickSize.Seconds()
+func roundTime(t time.Time, d time.Duration, dir bool) int {
+	if !dir {
+		t = items.FloorTime(t, d)
+	}
+	ticks := float64(t.Unix()) / d.Seconds()
 	if dir {
 		return int(math.Ceil(ticks))
 	}
@@ -766,7 +771,8 @@ func (analyser *BurndownAnalysis) roundTime(unix int64, dir bool) int {
 // mergeMatrices takes two [number of samples][number of bands] matrices,
 // resamples them to ticks so that they become square, sums and resamples back to the
 // least of (sampling1, sampling2) and (granularity1, granularity2).
-func (analyser *BurndownAnalysis) mergeMatrices(m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int,
+func (analyser *BurndownAnalysis) mergeMatrices(
+	m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int, tickSize time.Duration,
 	c1, c2 *core.CommonAnalysisResult) DenseHistory {
 	commonMerged := c1.Copy()
 	commonMerged.Merge(c2)
@@ -783,19 +789,19 @@ func (analyser *BurndownAnalysis) mergeMatrices(m1, m2 DenseHistory, granularity
 		granularity = granularity2
 	}
 
-	size := analyser.roundTime(commonMerged.EndTime, true) -
-		analyser.roundTime(commonMerged.BeginTime, false)
+	size := roundTime(commonMerged.EndTimeAsTime(), tickSize, true) -
+		roundTime(commonMerged.BeginTimeAsTime(), tickSize, false)
 	perTick := make([][]float32, size+granularity)
 	for i := range perTick {
 		perTick[i] = make([]float32, size+sampling)
 	}
 	if len(m1) > 0 {
 		addBurndownMatrix(m1, granularity1, sampling1, perTick,
-			analyser.roundTime(c1.BeginTime, false)-analyser.roundTime(commonMerged.BeginTime, false))
+			roundTime(c1.BeginTimeAsTime(), tickSize, false)-roundTime(commonMerged.BeginTimeAsTime(), tickSize, false))
 	}
 	if len(m2) > 0 {
 		addBurndownMatrix(m2, granularity2, sampling2, perTick,
-			analyser.roundTime(c2.BeginTime, false)-analyser.roundTime(commonMerged.BeginTime, false))
+			roundTime(c2.BeginTimeAsTime(), tickSize, false)-roundTime(commonMerged.BeginTimeAsTime(), tickSize, false))
 	}
 
 	// convert daily to [][]int64
@@ -992,7 +998,7 @@ func addBurndownMatrix(matrix DenseHistory, granularity, sampling int, accPerTic
 func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
 	fmt.Fprintln(writer, "  granularity:", result.granularity)
 	fmt.Fprintln(writer, "  sampling:", result.sampling)
-	fmt.Fprintln(writer, "  tick_size:", result.TickSize)
+	fmt.Fprintln(writer, "  tick_size:", int(result.tickSize.Seconds()))
 	yaml.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
 	if len(result.FileHistories) > 0 {
 		fmt.Fprintln(writer, "  files:")
@@ -1045,7 +1051,7 @@ func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer
 	message := pb.BurndownAnalysisResults{
 		Granularity: int32(result.granularity),
 		Sampling:    int32(result.sampling),
-		TickSize:    int64(result.TickSize),
+		TickSize:    int64(result.tickSize),
 	}
 	if len(result.GlobalHistory) > 0 {
 		message.Project = pb.ToBurndownSparseMatrix(result.GlobalHistory, "project")

+ 19 - 19
leaves/burndown_test.go

@@ -625,7 +625,7 @@ func TestBurndownSerialize(t *testing.T) {
 	assert.Nil(t, bd.Serialize(out, false, buffer))
 	assert.Equal(t, buffer.String(), `  granularity: 30
   sampling: 30
-  tick_size: 24h0m0s
+  tick_size: 86400
   "project": |-
     1145    0
      464  369
@@ -656,9 +656,9 @@ func TestBurndownSerialize(t *testing.T) {
      369    0    0    0
 `)
 	buffer = &bytes.Buffer{}
-	bd.Serialize(out, true, buffer)
+	assert.NoError(t, bd.Serialize(out, true, buffer))
 	msg := pb.BurndownAnalysisResults{}
-	proto.Unmarshal(buffer.Bytes(), &msg)
+	assert.NoError(t, proto.Unmarshal(buffer.Bytes(), &msg))
 	assert.Equal(t, msg.TickSize, int64(24*time.Hour))
 	assert.Equal(t, msg.Granularity, int32(30))
 	assert.Equal(t, msg.Sampling, int32(30))
@@ -714,7 +714,7 @@ func TestBurndownSerializeAuthorMissing(t *testing.T) {
 	assert.Nil(t, bd.Serialize(out, false, buffer))
 	assert.Equal(t, buffer.String(), `  granularity: 30
   sampling: 30
-  tick_size: 24h0m0s
+  tick_size: 86400
   "project": |-
     1145    0
      464  369
@@ -745,9 +745,9 @@ func TestBurndownSerializeAuthorMissing(t *testing.T) {
        0    0    0    0
 `)
 	buffer = &bytes.Buffer{}
-	bd.Serialize(out, true, buffer)
+	assert.NoError(t, bd.Serialize(out, true, buffer))
 	msg := pb.BurndownAnalysisResults{}
-	proto.Unmarshal(buffer.Bytes(), &msg)
+	assert.NoError(t, proto.Unmarshal(buffer.Bytes(), &msg))
 	assert.Equal(t, msg.Granularity, int32(30))
 	assert.Equal(t, msg.Sampling, int32(30))
 	assert.Equal(t, msg.Project.Name, "project")
@@ -1063,7 +1063,7 @@ func TestBurndownMergeGlobalHistory(t *testing.T) {
 		reversedPeopleDict: people1[:],
 		sampling:           15,
 		granularity:        20,
-		TickSize:           24 * time.Hour,
+		tickSize:           24 * time.Hour,
 	}
 	c1 := core.CommonAnalysisResult{
 		BeginTime:     600566400, // 1989 Jan 12
@@ -1109,7 +1109,7 @@ func TestBurndownMergeGlobalHistory(t *testing.T) {
 		FileHistories:      map[string][][]int64{},
 		PeopleHistories:    nil,
 		PeopleMatrix:       nil,
-		TickSize:           24 * time.Hour,
+		tickSize:           24 * time.Hour,
 		reversedPeopleDict: people2[:],
 		sampling:           14,
 		granularity:        19,
@@ -1158,7 +1158,7 @@ func TestBurndownMergeGlobalHistory(t *testing.T) {
 	merged := bd.MergeResults(res1, res2, &c1, &c2).(BurndownResult)
 	assert.Equal(t, merged.granularity, 19)
 	assert.Equal(t, merged.sampling, 14)
-	assert.Equal(t, merged.TickSize, 24*time.Hour)
+	assert.Equal(t, merged.tickSize, 24*time.Hour)
 	assert.Len(t, merged.GlobalHistory, 5)
 	for _, row := range merged.GlobalHistory {
 		assert.Len(t, row, 4)
@@ -1194,7 +1194,7 @@ func TestBurndownMergeGlobalHistory(t *testing.T) {
 
 func TestBurndownMergeGlobalHistory_withDifferentTickSizes(t *testing.T) {
 	res1 := BurndownResult{
-		TickSize: 13 * time.Hour,
+		tickSize: 13 * time.Hour,
 	}
 	c1 := core.CommonAnalysisResult{
 		BeginTime:     600566400, // 1989 Jan 12
@@ -1203,7 +1203,7 @@ func TestBurndownMergeGlobalHistory_withDifferentTickSizes(t *testing.T) {
 		RunTime:       100000,
 	}
 	res2 := BurndownResult{
-		TickSize: 24 * time.Hour,
+		tickSize: 24 * time.Hour,
 	}
 	c2 := core.CommonAnalysisResult{
 		BeginTime:     601084800, // 1989 Jan 18
@@ -1225,7 +1225,7 @@ func TestBurndownMergeNils(t *testing.T) {
 		FileHistories:      map[string][][]int64{},
 		PeopleHistories:    nil,
 		PeopleMatrix:       nil,
-		TickSize:           24 * time.Hour,
+		tickSize:           24 * time.Hour,
 		reversedPeopleDict: nil,
 		sampling:           15,
 		granularity:        20,
@@ -1241,7 +1241,7 @@ func TestBurndownMergeNils(t *testing.T) {
 		FileHistories:      nil,
 		PeopleHistories:    nil,
 		PeopleMatrix:       nil,
-		TickSize:           24 * time.Hour,
+		tickSize:           24 * time.Hour,
 		reversedPeopleDict: nil,
 		sampling:           14,
 		granularity:        19,
@@ -1258,7 +1258,7 @@ func TestBurndownMergeNils(t *testing.T) {
 	merged := bd.MergeResults(res1, res2, &c1, &c2).(BurndownResult)
 	assert.Equal(t, merged.granularity, 19)
 	assert.Equal(t, merged.sampling, 14)
-	assert.Equal(t, merged.TickSize, 24*time.Hour)
+	assert.Equal(t, merged.tickSize, 24*time.Hour)
 	assert.Nil(t, merged.GlobalHistory)
 	assert.Nil(t, merged.FileHistories)
 	assert.Nil(t, merged.PeopleHistories)
@@ -1337,7 +1337,7 @@ func TestBurndownDeserialize(t *testing.T) {
 	assert.True(t, len(result.PeopleMatrix) > 0)
 	assert.Equal(t, result.granularity, 30)
 	assert.Equal(t, result.sampling, 30)
-	assert.Equal(t, result.TickSize, 24*time.Hour)
+	assert.Equal(t, result.tickSize, 24*time.Hour)
 }
 
 func TestBurndownEmptyFileHistory(t *testing.T) {
@@ -1500,13 +1500,13 @@ func TestBurndownMergeMatrices(t *testing.T) {
 		RunTime:       1567214,
 	}
 	bd := BurndownAnalysis{tickSize: 24 * time.Hour}
-	nh := bd.mergeMatrices(h, nil, 30, 30, 30, 30, cr, cr)
+	nh := bd.mergeMatrices(h, nil, 30, 30, 30, 30, bd.tickSize, cr, cr)
 	for y, row := range nh {
 		for x, v := range row {
 			assert.InDelta(t, v, h[y][x], 1, fmt.Sprintf("y=%d x=%d", y, x))
 		}
 	}
-	nh = bd.mergeMatrices(h, h, 30, 30, 30, 30, cr, cr)
+	nh = bd.mergeMatrices(h, h, 30, 30, 30, 30, bd.tickSize, cr, cr)
 	for y, row := range nh {
 		for x, v := range row {
 			assert.InDelta(t, v, h[y][x]*2, 1, fmt.Sprintf("y=%d x=%d", y, x))
@@ -1531,7 +1531,7 @@ func TestBurndownMergePeopleHistories(t *testing.T) {
 		FileHistories:      map[string][][]int64{},
 		PeopleHistories:    [][][]int64{h1, h1},
 		PeopleMatrix:       nil,
-		TickSize:           24 * time.Hour,
+		tickSize:           24 * time.Hour,
 		reversedPeopleDict: []string{"one", "three"},
 		sampling:           15, // 3
 		granularity:        20, // 3
@@ -1547,7 +1547,7 @@ func TestBurndownMergePeopleHistories(t *testing.T) {
 		FileHistories:      nil,
 		PeopleHistories:    [][][]int64{h2, h2},
 		PeopleMatrix:       nil,
-		TickSize:           24 * time.Hour,
+		tickSize:           24 * time.Hour,
 		reversedPeopleDict: []string{"one", "two"},
 		sampling:           14,
 		granularity:        19,

+ 31 - 0
leaves/devs.go

@@ -1,10 +1,12 @@
 package leaves
 
 import (
+	"errors"
 	"fmt"
 	"io"
 	"sort"
 	"strings"
+	"time"
 
 	"github.com/gogo/protobuf/proto"
 	"gopkg.in/src-d/go-git.v4"
@@ -31,6 +33,8 @@ type DevsAnalysis struct {
 	ticks map[int]map[int]*DevTick
 	// reversedPeopleDict references IdentityDetector.ReversedPeopleDict
 	reversedPeopleDict []string
+	// tickSize references TicksSinceStart.tickSize
+	tickSize time.Duration
 
 	l core.Logger
 }
@@ -43,6 +47,8 @@ type DevsResult struct {
 
 	// reversedPeopleDict references IdentityDetector.ReversedPeopleDict
 	reversedPeopleDict []string
+	// tickSize references TicksSinceStart.tickSize
+	tickSize time.Duration
 }
 
 // DevTick is the statistics for a development tick and a particular developer.
@@ -103,6 +109,9 @@ func (devs *DevsAnalysis) Configure(facts map[string]interface{}) error {
 	if val, exists := facts[identity.FactIdentityDetectorReversedPeopleDict].([]string); exists {
 		devs.reversedPeopleDict = val
 	}
+	if val, exists := facts[items.FactTickSize].(time.Duration); exists {
+		devs.tickSize = val
+	}
 	return nil
 }
 
@@ -119,6 +128,9 @@ func (devs *DevsAnalysis) Description() string {
 // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
 // calls. The repository which is going to be analysed is supplied as an argument.
 func (devs *DevsAnalysis) Initialize(repository *git.Repository) error {
+	if devs.tickSize == 0 {
+		return errors.New("tick size must be specified")
+	}
 	devs.l = core.NewLogger()
 	devs.ticks = map[int]map[int]*DevTick{}
 	devs.OneShotMergeProcessor.Initialize()
@@ -178,6 +190,7 @@ func (devs *DevsAnalysis) Finalize() interface{} {
 	return DevsResult{
 		Ticks:              devs.ticks,
 		reversedPeopleDict: devs.reversedPeopleDict,
+		tickSize:           devs.tickSize,
 	}
 }
 
@@ -234,6 +247,7 @@ func (devs *DevsAnalysis) Deserialize(pbmessage []byte) (interface{}, error) {
 	result := DevsResult{
 		Ticks:              ticks,
 		reversedPeopleDict: message.DevIndex,
+		tickSize:           time.Duration(message.TickSize),
 	}
 	return result, nil
 }
@@ -242,6 +256,19 @@ func (devs *DevsAnalysis) Deserialize(pbmessage []byte) (interface{}, error) {
 func (devs *DevsAnalysis) MergeResults(r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {
 	cr1 := r1.(DevsResult)
 	cr2 := r2.(DevsResult)
+	if cr1.tickSize != cr2.tickSize {
+		return fmt.Errorf("mismatching tick sizes (r1: %d, r2: %d) received",
+			cr1.tickSize, cr2.tickSize)
+	}
+	t01 := items.FloorTime(c1.BeginTimeAsTime(), cr1.tickSize)
+	t02 := items.FloorTime(c2.BeginTimeAsTime(), cr2.tickSize)
+	t0 := t01
+	if t02.Before(t0) {
+		t0 = t02
+	}
+	offset1 := int(t01.Sub(t0) / cr1.tickSize)
+	offset2 := int(t02.Sub(t0) / cr2.tickSize)
+
 	merged := DevsResult{}
 	var mergedIndex map[string]identity.MergedIndex
 	mergedIndex, merged.reversedPeopleDict = identity.MergeReversedDictsIdentities(
@@ -249,6 +276,7 @@ func (devs *DevsAnalysis) MergeResults(r1, r2 interface{}, c1, c2 *core.CommonAn
 	newticks := map[int]map[int]*DevTick{}
 	merged.Ticks = newticks
 	for tick, dd := range cr1.Ticks {
+		tick += offset1
 		newdd, exists := newticks[tick]
 		if !exists {
 			newdd = map[int]*DevTick{}
@@ -279,6 +307,7 @@ func (devs *DevsAnalysis) MergeResults(r1, r2 interface{}, c1, c2 *core.CommonAn
 		}
 	}
 	for tick, dd := range cr2.Ticks {
+		tick += offset2
 		newdd, exists := newticks[tick]
 		if !exists {
 			newdd = map[int]*DevTick{}
@@ -357,11 +386,13 @@ func (devs *DevsAnalysis) serializeText(result *DevsResult, writer io.Writer) {
 	for _, person := range result.reversedPeopleDict {
 		fmt.Fprintf(writer, "  - %s\n", yaml.SafeString(person))
 	}
+	fmt.Fprintln(writer, "  tick_size:", int(result.tickSize.Seconds()))
 }
 
 func (devs *DevsAnalysis) serializeBinary(result *DevsResult, writer io.Writer) error {
 	message := pb.DevsAnalysisResults{}
 	message.DevIndex = result.reversedPeopleDict
+	message.TickSize = int64(result.tickSize)
 	message.Ticks = map[int32]*pb.TickDevs{}
 	for tick, devs := range result.Ticks {
 		dd := &pb.TickDevs{}

+ 42 - 3
leaves/devs_test.go

@@ -3,6 +3,7 @@ package leaves
 import (
 	"bytes"
 	"testing"
+	"time"
 
 	"github.com/gogo/protobuf/proto"
 	"github.com/stretchr/testify/assert"
@@ -18,6 +19,7 @@ import (
 
 func fixtureDevs() *DevsAnalysis {
 	d := DevsAnalysis{}
+	d.tickSize = 24 * time.Hour
 	d.Initialize(test.Repository)
 	people := [...]string{"one@srcd", "two@srcd"}
 	d.reversedPeopleDict = people[:]
@@ -67,13 +69,17 @@ func TestDevsConfigure(t *testing.T) {
 	devs := DevsAnalysis{}
 	facts := map[string]interface{}{}
 	facts[ConfigDevsConsiderEmptyCommits] = true
-	devs.Configure(facts)
-	assert.Equal(t, devs.ConsiderEmptyCommits, true)
+	facts[items.FactTickSize] = 3 * time.Hour
+	assert.NoError(t, devs.Configure(facts))
+	assert.True(t, devs.ConsiderEmptyCommits)
+	assert.Equal(t, 3*time.Hour, devs.tickSize)
 }
 
 func TestDevsInitialize(t *testing.T) {
 	d := fixtureDevs()
 	assert.NotNil(t, d.ticks)
+	d = &DevsAnalysis{}
+	assert.Error(t, d.Initialize(test.Repository))
 }
 
 func TestDevsConsumeFinalize(t *testing.T) {
@@ -279,6 +285,7 @@ func TestDevsFinalize(t *testing.T) {
 	x := devs.Finalize().(DevsResult)
 	assert.Equal(t, x.Ticks, devs.ticks)
 	assert.Equal(t, x.reversedPeopleDict, devs.reversedPeopleDict)
+	assert.Equal(t, 24*time.Hour, devs.tickSize)
 }
 
 func TestDevsFork(t *testing.T) {
@@ -310,6 +317,7 @@ func TestDevsSerialize(t *testing.T) {
   people:
   - "one@srcd"
   - "two@srcd"
+  tick_size: 86400
 `, buffer.String())
 
 	buffer = &bytes.Buffer{}
@@ -318,6 +326,7 @@ func TestDevsSerialize(t *testing.T) {
 	msg := pb.DevsAnalysisResults{}
 	assert.Nil(t, proto.Unmarshal(buffer.Bytes(), &msg))
 	assert.Equal(t, msg.DevIndex, devs.reversedPeopleDict)
+	assert.Equal(t, int64(24*time.Hour), msg.TickSize)
 	assert.Len(t, msg.Ticks, 2)
 	assert.Len(t, msg.Ticks[1].Devs, 2)
 	assert.Equal(t, msg.Ticks[1].Devs[0], &pb.DevTick{
@@ -360,6 +369,7 @@ func TestDevsMergeResults(t *testing.T) {
 	r1 := DevsResult{
 		Ticks:              map[int]map[int]*DevTick{},
 		reversedPeopleDict: people1[:],
+		tickSize:           24 * time.Hour,
 	}
 	r1.Ticks[1] = map[int]*DevTick{}
 	r1.Ticks[1][0] = &DevTick{10, ls(20, 30, 40), map[string]items.LineStats{"Go": ls(12, 13, 14)}}
@@ -373,6 +383,7 @@ func TestDevsMergeResults(t *testing.T) {
 	r2 := DevsResult{
 		Ticks:              map[int]map[int]*DevTick{},
 		reversedPeopleDict: people2[:],
+		tickSize:           22 * time.Hour,
 	}
 	r2.Ticks[1] = map[int]*DevTick{}
 	r2.Ticks[1][0] = &DevTick{10, ls(20, 30, 40), map[string]items.LineStats{"Go": ls(12, 13, 14)}}
@@ -387,7 +398,10 @@ func TestDevsMergeResults(t *testing.T) {
 		100, ls(200, 300, 400), map[string]items.LineStats{"Go": ls(62, 63, 64)}}
 
 	devs := fixtureDevs()
-	rm := devs.MergeResults(r1, r2, nil, nil).(DevsResult)
+	c1 := core.CommonAnalysisResult{BeginTime: 1556224895}
+	assert.IsType(t, assert.AnError, devs.MergeResults(r1, r2, &c1, &c1))
+	r2.tickSize = r1.tickSize
+	rm := devs.MergeResults(r1, r2, &c1, &c1).(DevsResult)
 	peoplerm := [...]string{"1@srcd", "2@srcd", "3@srcd"}
 	assert.Equal(t, rm.reversedPeopleDict, peoplerm[:])
 	assert.Len(t, rm.Ticks, 4)
@@ -408,4 +422,29 @@ func TestDevsMergeResults(t *testing.T) {
 		identity.AuthorMissing: {
 			100 * 2, ls(200*2, 300*2, 400*2), map[string]items.LineStats{"Go": ls(94, 96, 98)}},
 	})
+
+	c2 := core.CommonAnalysisResult{BeginTime: 1556224895 + 24*3600}
+	rm = devs.MergeResults(r1, r2, &c1, &c2).(DevsResult)
+	assert.Len(t, rm.Ticks, 5)
+	assert.Equal(t, rm.Ticks[1], map[int]*DevTick{
+		0: {10, ls(20, 30, 40), map[string]items.LineStats{"Go": ls(12, 13, 14)}},
+		1: {1, ls(2, 3, 4), map[string]items.LineStats{"Go": ls(22, 23, 24)}},
+	})
+	assert.Equal(t, rm.Ticks[2], map[int]*DevTick{
+		2: {10, ls(20, 30, 40), map[string]items.LineStats{"Go": ls(12, 13, 14)}},
+		0: {1, ls(2, 3, 4), map[string]items.LineStats{"Go": ls(22, 23, 24)}},
+	})
+	assert.Equal(t, rm.Ticks[3], map[int]*DevTick{
+		2:                      {11, ls(21, 31, 41), map[string]items.LineStats{"Go": ls(32, 33, 34)}},
+		identity.AuthorMissing: {100, ls(200, 300, 400), map[string]items.LineStats{"Go": ls(42, 43, 44)}},
+	})
+	assert.Equal(t, rm.Ticks[10], map[int]*DevTick{
+		0:                      {11, ls(21, 31, 41), map[string]items.LineStats{}},
+		identity.AuthorMissing: {100, ls(200, 300, 400), map[string]items.LineStats{"Go": ls(32, 33, 34)}},
+	})
+	assert.Equal(t, rm.Ticks[11], map[int]*DevTick{
+		1:                      {10, ls(20, 30, 40), map[string]items.LineStats{"Go": ls(42, 43, 44)}},
+		2:                      {11, ls(21, 31, 41), map[string]items.LineStats{"Go": ls(52, 53, 54)}},
+		identity.AuthorMissing: {100, ls(200, 300, 400), map[string]items.LineStats{"Go": ls(62, 63, 64)}},
+	})
 }

文件差異過大導致無法顯示
+ 31 - 24
python/labours/pb_pb2.py