| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385 | package leavesimport (	"errors"	"fmt"	"io"	"io/ioutil"	"log"	"os"	"sort"	"sync"	"unicode/utf8"	"github.com/gogo/protobuf/proto"	"github.com/sergi/go-diff/diffmatchpatch"	"gopkg.in/src-d/go-git.v4"	"gopkg.in/src-d/go-git.v4/plumbing"	"gopkg.in/src-d/go-git.v4/plumbing/object"	"gopkg.in/src-d/go-git.v4/utils/merkletrie"	"gopkg.in/src-d/hercules.v6/internal/burndown"	"gopkg.in/src-d/hercules.v6/internal/core"	"gopkg.in/src-d/hercules.v6/internal/pb"	items "gopkg.in/src-d/hercules.v6/internal/plumbing"	"gopkg.in/src-d/hercules.v6/internal/plumbing/identity"	"gopkg.in/src-d/hercules.v6/internal/rbtree"	"gopkg.in/src-d/hercules.v6/internal/yaml")// BurndownAnalysis allows to gather the line burndown statistics for a Git repository.// It is a LeafPipelineItem.// Reference: https://erikbern.com/2016/12/05/the-half-life-of-code.htmltype BurndownAnalysis struct {	// Granularity sets the size of each band - the number of days it spans.	// Smaller values provide better resolution but require more work and eat more	// memory. 30 days is usually enough.	Granularity int	// Sampling sets how detailed is the statistic - the size of the interval in	// days between consecutive measurements. It may not be greater than Granularity. Try 15 or 30.	Sampling int	// TrackFiles enables or disables the fine-grained per-file burndown analysis.	// It does not change the project level burndown results.	TrackFiles bool	// PeopleNumber is the number of developers for which to collect the burndown stats. 0 disables it.	PeopleNumber int	// HibernationThreshold sets the hibernation threshold for the underlying	// RBTree allocator. It is useful to trade CPU time for reduced peak memory consumption	// if there are many branches.	HibernationThreshold int	// HibernationToDisk specifies whether the hibernated RBTree allocator must be saved on disk	// rather than kept in memory.	HibernationToDisk bool	// HibernationDirectory is the name of the temporary directory to use for saving hibernated	// RBTree allocators.	HibernationDirectory string	// Debug activates the debugging mode. Analyse() runs slower in this mode	// but it accurately checks all the intermediate states for invariant	// violations.	Debug bool	// Repository points to the analysed Git repository struct from go-git.	repository *git.Repository	// globalHistory is the daily deltas of daily line counts.	// E.g. day 0: day 0 +50 lines	//      day 10: day 0 -10 lines; day 10 +20 lines	//      day 12: day 0 -5 lines; day 10 -3 lines; day 12 +10 lines	// map [0] [0] = 50	// map[10] [0] = -10	// map[10][10] = 20	// map[12] [0] = -5	// map[12][10] = -3	// map[12][12] = 10	globalHistory sparseHistory	// fileHistories is the daily deltas of each file's daily line counts.	fileHistories map[string]sparseHistory	// peopleHistories is the daily deltas of each person's daily line counts.	peopleHistories []sparseHistory	// files is the mapping <file path> -> *File.	files map[string]*burndown.File	// fileAllocator is the allocator for RBTree-s in `files`.	fileAllocator *rbtree.Allocator	// hibernatedFileName is the path to the serialized `fileAllocator`.	hibernatedFileName string	// mergedFiles is used during merges to record the real file hashes	mergedFiles map[string]bool	// mergedAuthor of the processed merge commit	mergedAuthor int	// renames is a quick and dirty solution for the "future branch renames" problem.	renames map[string]string	// matrix is the mutual deletions and self insertions.	matrix []map[int]int64	// day is the most recent day index processed.	day int	// previousDay is the day from the previous sample period -	// different from DaysSinceStart.previousDay.	previousDay int	// references IdentityDetector.ReversedPeopleDict	reversedPeopleDict []string}// BurndownResult carries the result of running BurndownAnalysis - it is returned by// BurndownAnalysis.Finalize().type BurndownResult struct {	// [number of samples][number of bands]	// The number of samples depends on Sampling: the less Sampling, the bigger the number.	// The number of bands depends on Granularity: the less Granularity, the bigger the number.	GlobalHistory DenseHistory	// The key is the path inside the Git repository. The value's dimensions are the same as	// in GlobalHistory.	FileHistories map[string]DenseHistory	// [number of people][number of samples][number of bands]	PeopleHistories []DenseHistory	// [number of people][number of people + 2]	// The first element is the total number of lines added by the author.	// The second element is the number of removals by unidentified authors (outside reversedPeopleDict).	// The rest of the elements are equal the number of line removals by the corresponding	// authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.	PeopleMatrix DenseHistory	// The following members are private.	// reversedPeopleDict is borrowed from IdentityDetector and becomes available after	// Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via	// facts[FactIdentityDetectorReversedPeopleDict].	reversedPeopleDict []string	// sampling and granularity are copied from BurndownAnalysis and stored for service purposes	// such as merging several results together.	sampling    int	granularity int}const (	// ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.	ConfigBurndownGranularity = "Burndown.Granularity"	// ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.	ConfigBurndownSampling = "Burndown.Sampling"	// ConfigBurndownTrackFiles enables burndown collection for files.	ConfigBurndownTrackFiles = "Burndown.TrackFiles"	// ConfigBurndownTrackPeople enables burndown collection for authors.	ConfigBurndownTrackPeople = "Burndown.TrackPeople"	// ConfigBurndownHibernationThreshold sets the hibernation threshold for the underlying	// RBTree allocator. It is useful to trade CPU time for reduced peak memory consumption	// if there are many branches.	ConfigBurndownHibernationThreshold = "Burndown.HibernationThreshold"	// ConfigBurndownHibernationToDisk sets whether the hibernated RBTree allocator must be saved	// on disk rather than kept in memory.	ConfigBurndownHibernationToDisk = "Burndown.HibernationOnDisk"	// ConfigBurndownHibernationDirectory sets the name of the temporary directory to use for	// saving hibernated RBTree allocators.	ConfigBurndownHibernationDirectory = "Burndown.HibernationDirectory"	// ConfigBurndownDebug enables some extra debug assertions.	ConfigBurndownDebug = "Burndown.Debug"	// DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity	// and BurndownAnalysis.Sampling.	DefaultBurndownGranularity = 30	// authorSelf is the internal author index which is used in BurndownAnalysis.Finalize() to	// format the author overwrites matrix.	authorSelf = (1 << (32 - burndown.TreeMaxBinPower)) - 2)type sparseHistory = map[int]map[int]int64// DenseHistory is the matrix [number of samples][number of bands] -> number of lines.type DenseHistory = [][]int64// Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.func (analyser *BurndownAnalysis) Name() string {	return "Burndown"}// Provides returns the list of names of entities which are produced by this PipelineItem.// Each produced entity will be inserted into `deps` of dependent Consume()-s according// to this list. Also used by core.Registry to build the global map of providers.func (analyser *BurndownAnalysis) Provides() []string {	return []string{}}// Requires returns the list of names of entities which are needed by this PipelineItem.// Each requested entity will be inserted into `deps` of Consume(). In turn, those// entities are Provides() upstream.func (analyser *BurndownAnalysis) Requires() []string {	arr := [...]string{		items.DependencyFileDiff, items.DependencyTreeChanges, items.DependencyBlobCache,		items.DependencyDay, identity.DependencyAuthor}	return arr[:]}// ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.func (analyser *BurndownAnalysis) ListConfigurationOptions() []core.ConfigurationOption {	options := [...]core.ConfigurationOption{{		Name:        ConfigBurndownGranularity,		Description: "How many days there are in a single band.",		Flag:        "granularity",		Type:        core.IntConfigurationOption,		Default:     DefaultBurndownGranularity}, {		Name:        ConfigBurndownSampling,		Description: "How frequently to record the state in days.",		Flag:        "sampling",		Type:        core.IntConfigurationOption,		Default:     DefaultBurndownGranularity}, {		Name:        ConfigBurndownTrackFiles,		Description: "Record detailed statistics per each file.",		Flag:        "burndown-files",		Type:        core.BoolConfigurationOption,		Default:     false}, {		Name:        ConfigBurndownTrackPeople,		Description: "Record detailed statistics per each developer.",		Flag:        "burndown-people",		Type:        core.BoolConfigurationOption,		Default:     false}, {		Name: ConfigBurndownHibernationThreshold,		Description: "The minimum size for the allocated memory in each branch to be compressed." +			"0 disables this optimization. Lower values trade CPU time more. Sane examples: Nx1000.",		Flag:    "burndown-hibernation-threshold",		Type:    core.IntConfigurationOption,		Default: 0}, {		Name: ConfigBurndownHibernationToDisk,		Description: "Save hibernated RBTree allocators to disk rather than keep it in memory; " +			"requires --burndown-hibernation-threshold to be greater than zero.",		Flag:    "burndown-hibernation-disk",		Type:    core.BoolConfigurationOption,		Default: false}, {		Name: ConfigBurndownHibernationDirectory,		Description: "Temporary directory where to save the hibernated RBTree allocators; " +			"requires --burndown-hibernation-disk.",		Flag:    "burndown-hibernation-dir",		Type:    core.PathConfigurationOption,		Default: ""}, {		Name:        ConfigBurndownDebug,		Description: "Validate the trees on each step.",		Flag:        "burndown-debug",		Type:        core.BoolConfigurationOption,		Default:     false},	}	return options[:]}// Configure sets the properties previously published by ListConfigurationOptions().func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) error {	if val, exists := facts[ConfigBurndownGranularity].(int); exists {		analyser.Granularity = val	}	if val, exists := facts[ConfigBurndownSampling].(int); exists {		analyser.Sampling = val	}	if val, exists := facts[ConfigBurndownTrackFiles].(bool); exists {		analyser.TrackFiles = val	}	if people, exists := facts[ConfigBurndownTrackPeople].(bool); people {		if val, exists := facts[identity.FactIdentityDetectorPeopleCount].(int); exists {			if val < 0 {				return fmt.Errorf("PeopleNumber is negative: %d", val)			}			analyser.PeopleNumber = val			analyser.reversedPeopleDict = facts[identity.FactIdentityDetectorReversedPeopleDict].([]string)		}	} else if exists {		analyser.PeopleNumber = 0	}	if val, exists := facts[ConfigBurndownHibernationThreshold].(int); exists {		analyser.HibernationThreshold = val	}	if val, exists := facts[ConfigBurndownHibernationToDisk].(bool); exists {		analyser.HibernationToDisk = val	}	if val, exists := facts[ConfigBurndownHibernationDirectory].(string); exists {		analyser.HibernationDirectory = val	}	if val, exists := facts[ConfigBurndownDebug].(bool); exists {		analyser.Debug = val	}	return nil}// Flag for the command line switch which enables this analysis.func (analyser *BurndownAnalysis) Flag() string {	return "burndown"}// Description returns the text which explains what the analysis is doing.func (analyser *BurndownAnalysis) Description() string {	return "Line burndown stats indicate the numbers of lines which were last edited within " +		"specific time intervals through time. Search for \"git-of-theseus\" in the internet."}// Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()// calls. The repository which is going to be analysed is supplied as an argument.func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) error {	if analyser.Granularity <= 0 {		log.Printf("Warning: adjusted the granularity to %d days\n",			DefaultBurndownGranularity)		analyser.Granularity = DefaultBurndownGranularity	}	if analyser.Sampling <= 0 {		log.Printf("Warning: adjusted the sampling to %d days\n",			DefaultBurndownGranularity)		analyser.Sampling = DefaultBurndownGranularity	}	if analyser.Sampling > analyser.Granularity {		log.Printf("Warning: granularity may not be less than sampling, adjusted to %d\n",			analyser.Granularity)		analyser.Sampling = analyser.Granularity	}	analyser.repository = repository	analyser.globalHistory = sparseHistory{}	analyser.fileHistories = map[string]sparseHistory{}	if analyser.PeopleNumber < 0 {		return fmt.Errorf("PeopleNumber is negative: %d", analyser.PeopleNumber)	}	analyser.peopleHistories = make([]sparseHistory, analyser.PeopleNumber)	analyser.files = map[string]*burndown.File{}	analyser.fileAllocator = rbtree.NewAllocator()	analyser.fileAllocator.HibernationThreshold = analyser.HibernationThreshold	analyser.mergedFiles = map[string]bool{}	analyser.mergedAuthor = identity.AuthorMissing	analyser.renames = map[string]string{}	analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)	analyser.day = 0	analyser.previousDay = 0	return nil}// Consume runs this PipelineItem on the next commit's data.// `deps` contain all the results from upstream PipelineItem-s as requested by Requires().// Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.// This function returns the mapping with analysis results. The keys must be the same as// in Provides(). If there was an error, nil is returned.func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {	if analyser.fileAllocator.Size() == 0 && len(analyser.files) > 0 {		panic("BurndownAnalysis.Consume() was called on a hibernated instance")	}	author := deps[identity.DependencyAuthor].(int)	day := deps[items.DependencyDay].(int)	if !deps[core.DependencyIsMerge].(bool) {		analyser.day = day		analyser.onNewDay()	} else {		// effectively disables the status updates if the commit is a merge		// we will analyse the conflicts resolution in Merge()		analyser.day = burndown.TreeMergeMark		analyser.mergedFiles = map[string]bool{}		analyser.mergedAuthor = author	}	cache := deps[items.DependencyBlobCache].(map[plumbing.Hash]*items.CachedBlob)	treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)	fileDiffs := deps[items.DependencyFileDiff].(map[string]items.FileDiffData)	for _, change := range treeDiffs {		action, _ := change.Action()		var err error		switch action {		case merkletrie.Insert:			err = analyser.handleInsertion(change, author, cache)		case merkletrie.Delete:			err = analyser.handleDeletion(change, author, cache)		case merkletrie.Modify:			err = analyser.handleModification(change, author, cache, fileDiffs)		}		if err != nil {			return nil, err		}	}	// in case there is a merge analyser.day equals to TreeMergeMark	analyser.day = day	return nil, nil}// Fork clones this item. Everything is copied by reference except the files// which are copied by value.func (analyser *BurndownAnalysis) Fork(n int) []core.PipelineItem {	result := make([]core.PipelineItem, n)	for i := range result {		clone := *analyser		clone.files = map[string]*burndown.File{}		clone.fileAllocator = clone.fileAllocator.Clone()		for key, file := range analyser.files {			clone.files[key] = file.CloneShallow(clone.fileAllocator)		}		result[i] = &clone	}	return result}// Merge combines several items together. We apply the special file merging logic here.func (analyser *BurndownAnalysis) Merge(branches []core.PipelineItem) {	all := make([]*BurndownAnalysis, len(branches)+1)	all[0] = analyser	for i, branch := range branches {		all[i+1] = branch.(*BurndownAnalysis)	}	keys := map[string]bool{}	for _, burn := range all {		for key, val := range burn.mergedFiles {			// (*)			// there can be contradicting flags,			// e.g. item was renamed and a new item written on its place			// this may be not exactly accurate			keys[key] = keys[key] || val		}	}	for key, val := range keys {		if !val {			for _, burn := range all {				if f, exists := burn.files[key]; exists {					f.Delete()				}				delete(burn.files, key)			}			continue		}		files := make([]*burndown.File, 0, len(all))		for _, burn := range all {			file := burn.files[key]			if file != nil {				// file can be nil if it is considered binary in this branch				files = append(files, file)			}		}		if len(files) == 0 {			// so we could be wrong in (*) and there is no such file eventually			// it could be also removed in the merge commit itself			continue		}		files[0].Merge(analyser.packPersonWithDay(analyser.mergedAuthor, analyser.day), files[1:]...)		for _, burn := range all {			if burn.files[key] != files[0] {				if burn.files[key] != nil {					burn.files[key].Delete()				}				burn.files[key] = files[0].CloneDeep(burn.fileAllocator)			}		}	}	analyser.onNewDay()}// Hibernate compresses the bound RBTree memory with the files.func (analyser *BurndownAnalysis) Hibernate() error {	analyser.fileAllocator.Hibernate()	if analyser.HibernationToDisk {		file, err := ioutil.TempFile(analyser.HibernationDirectory, "*-hercules.bin")		if err != nil {			return err		}		analyser.hibernatedFileName = file.Name()		err = file.Close()		if err != nil {			analyser.hibernatedFileName = ""			return err		}		err = analyser.fileAllocator.Serialize(analyser.hibernatedFileName)		if err != nil {			analyser.hibernatedFileName = ""			return err		}	}	return nil}// Boot decompresses the bound RBTree memory with the files.func (analyser *BurndownAnalysis) Boot() error {	if analyser.hibernatedFileName != "" {		err := analyser.fileAllocator.Deserialize(analyser.hibernatedFileName)		if err != nil {			return err		}		err = os.Remove(analyser.hibernatedFileName)		if err != nil {			return err		}		analyser.hibernatedFileName = ""	}	analyser.fileAllocator.Boot()	return nil}// Finalize returns the result of the analysis. Further Consume() calls are not expected.func (analyser *BurndownAnalysis) Finalize() interface{} {	globalHistory, lastDay := analyser.groupSparseHistory(analyser.globalHistory, -1)	fileHistories := map[string]DenseHistory{}	for key, history := range analyser.fileHistories {		if len(history) > 0 {			fileHistories[key], _ = analyser.groupSparseHistory(history, lastDay)		}	}	peopleHistories := make([]DenseHistory, analyser.PeopleNumber)	for i, history := range analyser.peopleHistories {		if len(history) > 0 {			// there can be people with only trivial merge commits and without own lines			peopleHistories[i], _ = analyser.groupSparseHistory(history, lastDay)		} else {			peopleHistories[i] = make(DenseHistory, len(globalHistory))			for j, gh := range globalHistory {				peopleHistories[i][j] = make([]int64, len(gh))			}		}	}	peopleMatrix := make(DenseHistory, analyser.PeopleNumber)	for i, row := range analyser.matrix {		mrow := make([]int64, analyser.PeopleNumber+2)		peopleMatrix[i] = mrow		for key, val := range row {			if key == identity.AuthorMissing {				key = -1			} else if key == authorSelf {				key = -2			}			mrow[key+2] = val		}	}	return BurndownResult{		GlobalHistory:      globalHistory,		FileHistories:      fileHistories,		PeopleHistories:    peopleHistories,		PeopleMatrix:       peopleMatrix,		reversedPeopleDict: analyser.reversedPeopleDict,		sampling:           analyser.Sampling,		granularity:        analyser.Granularity,	}}// Serialize converts the analysis result as returned by Finalize() to text or bytes.// The text format is YAML and the bytes format is Protocol Buffers.func (analyser *BurndownAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {	burndownResult := result.(BurndownResult)	if binary {		return analyser.serializeBinary(&burndownResult, writer)	}	analyser.serializeText(&burndownResult, writer)	return nil}// Deserialize converts the specified protobuf bytes to BurndownResult.func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, error) {	msg := pb.BurndownAnalysisResults{}	err := proto.Unmarshal(pbmessage, &msg)	if err != nil {		return nil, err	}	result := BurndownResult{}	convertCSR := func(mat *pb.BurndownSparseMatrix) DenseHistory {		res := make(DenseHistory, mat.NumberOfRows)		for i := 0; i < int(mat.NumberOfRows); i++ {			res[i] = make([]int64, mat.NumberOfColumns)			for j := 0; j < len(mat.Rows[i].Columns); j++ {				res[i][j] = int64(mat.Rows[i].Columns[j])			}		}		return res	}	result.GlobalHistory = convertCSR(msg.Project)	result.FileHistories = map[string]DenseHistory{}	for _, mat := range msg.Files {		result.FileHistories[mat.Name] = convertCSR(mat)	}	result.reversedPeopleDict = make([]string, len(msg.People))	result.PeopleHistories = make([]DenseHistory, len(msg.People))	for i, mat := range msg.People {		result.PeopleHistories[i] = convertCSR(mat)		result.reversedPeopleDict[i] = mat.Name	}	if msg.PeopleInteraction != nil {		result.PeopleMatrix = make(DenseHistory, msg.PeopleInteraction.NumberOfRows)	}	for i := 0; i < len(result.PeopleMatrix); i++ {		result.PeopleMatrix[i] = make([]int64, msg.PeopleInteraction.NumberOfColumns)		for j := int(msg.PeopleInteraction.Indptr[i]); j < int(msg.PeopleInteraction.Indptr[i+1]); j++ {			result.PeopleMatrix[i][msg.PeopleInteraction.Indices[j]] = msg.PeopleInteraction.Data[j]		}	}	result.sampling = int(msg.Sampling)	result.granularity = int(msg.Granularity)	return result, nil}// MergeResults combines two BurndownResult-s together.func (analyser *BurndownAnalysis) MergeResults(	r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {	bar1 := r1.(BurndownResult)	bar2 := r2.(BurndownResult)	merged := BurndownResult{}	if bar1.sampling < bar2.sampling {		merged.sampling = bar1.sampling	} else {		merged.sampling = bar2.sampling	}	if bar1.granularity < bar2.granularity {		merged.granularity = bar1.granularity	} else {		merged.granularity = bar2.granularity	}	var people map[string][3]int	people, merged.reversedPeopleDict = identity.Detector{}.MergeReversedDicts(		bar1.reversedPeopleDict, bar2.reversedPeopleDict)	var wg sync.WaitGroup	if len(bar1.GlobalHistory) > 0 || len(bar2.GlobalHistory) > 0 {		wg.Add(1)		go func() {			defer wg.Done()			merged.GlobalHistory = mergeMatrices(				bar1.GlobalHistory, bar2.GlobalHistory,				bar1.granularity, bar1.sampling,				bar2.granularity, bar2.sampling,				c1, c2)		}()	}	if len(bar1.FileHistories) > 0 || len(bar2.FileHistories) > 0 {		merged.FileHistories = map[string]DenseHistory{}		historyMutex := sync.Mutex{}		for key, fh1 := range bar1.FileHistories {			if fh2, exists := bar2.FileHistories[key]; exists {				wg.Add(1)				go func(fh1, fh2 DenseHistory, key string) {					defer wg.Done()					historyMutex.Lock()					defer historyMutex.Unlock()					merged.FileHistories[key] = mergeMatrices(						fh1, fh2, bar1.granularity, bar1.sampling, bar2.granularity, bar2.sampling, c1, c2)				}(fh1, fh2, key)			} else {				historyMutex.Lock()				merged.FileHistories[key] = fh1				historyMutex.Unlock()			}		}		for key, fh2 := range bar2.FileHistories {			if _, exists := bar1.FileHistories[key]; !exists {				historyMutex.Lock()				merged.FileHistories[key] = fh2				historyMutex.Unlock()			}		}	}	if len(merged.reversedPeopleDict) > 0 {		merged.PeopleHistories = make([]DenseHistory, len(merged.reversedPeopleDict))		for i, key := range merged.reversedPeopleDict {			ptrs := people[key]			if ptrs[1] < 0 {				if len(bar2.PeopleHistories) > 0 {					merged.PeopleHistories[i] = bar2.PeopleHistories[ptrs[2]]				}			} else if ptrs[2] < 0 {				if len(bar1.PeopleHistories) > 0 {					merged.PeopleHistories[i] = bar1.PeopleHistories[ptrs[1]]				}			} else {				wg.Add(1)				go func(i int) {					defer wg.Done()					var m1, m2 DenseHistory					if len(bar1.PeopleHistories) > 0 {						m1 = bar1.PeopleHistories[ptrs[1]]					}					if len(bar2.PeopleHistories) > 0 {						m2 = bar2.PeopleHistories[ptrs[2]]					}					merged.PeopleHistories[i] = mergeMatrices(						m1, m2,						bar1.granularity, bar1.sampling,						bar2.granularity, bar2.sampling,						c1, c2,					)				}(i)			}		}		wg.Add(1)		go func() {			defer wg.Done()			if len(bar2.PeopleMatrix) == 0 {				merged.PeopleMatrix = bar1.PeopleMatrix				// extend the matrix in both directions				for i := 0; i < len(merged.PeopleMatrix); i++ {					for j := len(bar1.reversedPeopleDict); j < len(merged.reversedPeopleDict); j++ {						merged.PeopleMatrix[i] = append(merged.PeopleMatrix[i], 0)					}				}				for i := len(bar1.reversedPeopleDict); i < len(merged.reversedPeopleDict); i++ {					merged.PeopleMatrix = append(						merged.PeopleMatrix, make([]int64, len(merged.reversedPeopleDict)+2))				}			} else {				merged.PeopleMatrix = make(DenseHistory, len(merged.reversedPeopleDict))				for i := range merged.PeopleMatrix {					merged.PeopleMatrix[i] = make([]int64, len(merged.reversedPeopleDict)+2)				}				for i, key := range bar1.reversedPeopleDict {					mi := people[key][0] // index in merged.reversedPeopleDict					copy(merged.PeopleMatrix[mi][:2], bar1.PeopleMatrix[i][:2])					for j, val := range bar1.PeopleMatrix[i][2:] {						merged.PeopleMatrix[mi][2+people[bar1.reversedPeopleDict[j]][0]] = val					}				}				for i, key := range bar2.reversedPeopleDict {					mi := people[key][0] // index in merged.reversedPeopleDict					merged.PeopleMatrix[mi][0] += bar2.PeopleMatrix[i][0]					merged.PeopleMatrix[mi][1] += bar2.PeopleMatrix[i][1]					for j, val := range bar2.PeopleMatrix[i][2:] {						merged.PeopleMatrix[mi][2+people[bar2.reversedPeopleDict[j]][0]] += val					}				}			}		}()	}	wg.Wait()	return merged}// mergeMatrices takes two [number of samples][number of bands] matrices,// resamples them to days so that they become square, sums and resamples back to the// least of (sampling1, sampling2) and (granularity1, granularity2).func mergeMatrices(m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int,	c1, c2 *core.CommonAnalysisResult) DenseHistory {	commonMerged := *c1	commonMerged.Merge(c2)	var granularity, sampling int	if sampling1 < sampling2 {		sampling = sampling1	} else {		sampling = sampling2	}	if granularity1 < granularity2 {		granularity = granularity1	} else {		granularity = granularity2	}	size := int((commonMerged.EndTime - commonMerged.BeginTime) / (3600 * 24))	daily := make([][]float32, size+granularity+1)	for i := range daily {		daily[i] = make([]float32, size+sampling+1)	}	if len(m1) > 0 {		addBurndownMatrix(m1, granularity1, sampling1, daily,			int(c1.BeginTime-commonMerged.BeginTime)/(3600*24))	}	if len(m2) > 0 {		addBurndownMatrix(m2, granularity2, sampling2, daily,			int(c2.BeginTime-commonMerged.BeginTime)/(3600*24))	}	// convert daily to [][]int64	result := make(DenseHistory, (size+sampling-1)/sampling)	for i := range result {		result[i] = make([]int64, (size+granularity-1)/granularity)		sampledIndex := i * sampling		if i == len(result)-1 {			sampledIndex = size - 1		}		for j := 0; j < len(result[i]); j++ {			accum := float32(0)			for k := j * granularity; k < (j+1)*granularity && k < size; k++ {				accum += daily[sampledIndex][k]			}			result[i][j] = int64(accum)		}	}	return result}// Explode `matrix` so that it is daily sampled and has daily bands, shift by `offset` days// and add to the accumulator. `daily` size is square and is guaranteed to fit `matrix` by// the caller.// Rows: *at least* len(matrix) * sampling + offset// Columns: *at least* len(matrix[...]) * granularity + offset// `matrix` can be sparse, so that the last columns which are equal to 0 are truncated.func addBurndownMatrix(matrix DenseHistory, granularity, sampling int, daily [][]float32, offset int) {	// Determine the maximum number of bands; the actual one may be larger but we do not care	maxCols := 0	for _, row := range matrix {		if maxCols < len(row) {			maxCols = len(row)		}	}	neededRows := len(matrix)*sampling + offset	if len(daily) < neededRows {		log.Panicf("merge bug: too few daily rows: required %d, have %d",			neededRows, len(daily))	}	if len(daily[0]) < maxCols {		log.Panicf("merge bug: too few daily cols: required %d, have %d",			maxCols, len(daily[0]))	}	for x := 0; x < maxCols; x++ {		for y := 0; y < len(matrix); y++ {			if x*granularity > (y+1)*sampling {				// the future is zeros				continue			}			decay := func(startIndex int, startVal float32) {				if startVal == 0 {					return				}				k := float32(matrix[y][x]) / startVal // <= 1				scale := float32((y+1)*sampling - startIndex)				for i := x * granularity; i < (x+1)*granularity; i++ {					initial := daily[startIndex-1+offset][i+offset]					for j := startIndex; j < (y+1)*sampling; j++ {						daily[j+offset][i+offset] = initial * (1 + (k-1)*float32(j-startIndex+1)/scale)					}				}			}			raise := func(finishIndex int, finishVal float32) {				var initial float32				if y > 0 {					initial = float32(matrix[y-1][x])				}				startIndex := y * sampling				if startIndex < x*granularity {					startIndex = x * granularity				}				if startIndex == finishIndex {					return				}				avg := (finishVal - initial) / float32(finishIndex-startIndex)				for j := y * sampling; j < finishIndex; j++ {					for i := startIndex; i <= j; i++ {						daily[j+offset][i+offset] = avg					}				}				// copy [x*g..y*s)				for j := y * sampling; j < finishIndex; j++ {					for i := x * granularity; i < y*sampling; i++ {						daily[j+offset][i+offset] = daily[j-1+offset][i+offset]					}				}			}			if (x+1)*granularity >= (y+1)*sampling {				// x*granularity <= (y+1)*sampling				// 1. x*granularity <= y*sampling				//    y*sampling..(y+1)sampling				//				//       x+1				//        /				//       /				//      / y+1  -|				//     /        |				//    / y      -|				//   /				//  / x				//				// 2. x*granularity > y*sampling				//    x*granularity..(y+1)sampling				//				//       x+1				//        /				//       /				//      / y+1  -|				//     /        |				//    / x      -|				//   /				//  / y				if x*granularity <= y*sampling {					raise((y+1)*sampling, float32(matrix[y][x]))				} else if (y+1)*sampling > x*granularity {					raise((y+1)*sampling, float32(matrix[y][x]))					avg := float32(matrix[y][x]) / float32((y+1)*sampling-x*granularity)					for j := x * granularity; j < (y+1)*sampling; j++ {						for i := x * granularity; i <= j; i++ {							daily[j+offset][i+offset] = avg						}					}				}			} else if (x+1)*granularity >= y*sampling {				// y*sampling <= (x+1)*granularity < (y+1)sampling				// y*sampling..(x+1)*granularity				// (x+1)*granularity..(y+1)sampling				//        x+1				//         /\				//        /  \				//       /    \				//      /    y+1				//     /				//    y				v1 := float32(matrix[y-1][x])				v2 := float32(matrix[y][x])				var peak float32				delta := float32((x+1)*granularity - y*sampling)				var scale float32				var previous float32				if y > 0 && (y-1)*sampling >= x*granularity {					// x*g <= (y-1)*s <= y*s <= (x+1)*g <= (y+1)*s					//           |________|.......^					if y > 1 {						previous = float32(matrix[y-2][x])					}					scale = float32(sampling)				} else {					// (y-1)*s < x*g <= y*s <= (x+1)*g <= (y+1)*s					//            |______|.......^					if y == 0 {						scale = float32(sampling)					} else {						scale = float32(y*sampling - x*granularity)					}				}				peak = v1 + (v1-previous)/scale*delta				if v2 > peak {					// we need to adjust the peak, it may not be less than the decayed value					if y < len(matrix)-1 {						// y*s <= (x+1)*g <= (y+1)*s < (y+2)*s						//           ^.........|_________|						k := (v2 - float32(matrix[y+1][x])) / float32(sampling) // > 0						peak = float32(matrix[y][x]) + k*float32((y+1)*sampling-(x+1)*granularity)						// peak > v2 > v1					} else {						peak = v2						// not enough data to interpolate; this is at least not restricted					}				}				raise((x+1)*granularity, peak)				decay((x+1)*granularity, peak)			} else {				// (x+1)*granularity < y*sampling				// y*sampling..(y+1)sampling				decay(y*sampling, float32(matrix[y-1][x]))			}		}	}}func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {	fmt.Fprintln(writer, "  granularity:", result.granularity)	fmt.Fprintln(writer, "  sampling:", result.sampling)	yaml.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)	if len(result.FileHistories) > 0 {		fmt.Fprintln(writer, "  files:")		keys := sortedKeys(result.FileHistories)		for _, key := range keys {			yaml.PrintMatrix(writer, result.FileHistories[key], 4, key, true)		}	}	if len(result.PeopleHistories) > 0 {		fmt.Fprintln(writer, "  people_sequence:")		for key := range result.PeopleHistories {			fmt.Fprintln(writer, "    - "+yaml.SafeString(result.reversedPeopleDict[key]))		}		fmt.Fprintln(writer, "  people:")		for key, val := range result.PeopleHistories {			yaml.PrintMatrix(writer, val, 4, result.reversedPeopleDict[key], true)		}		fmt.Fprintln(writer, "  people_interaction: |-")		yaml.PrintMatrix(writer, result.PeopleMatrix, 4, "", false)	}}func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer io.Writer) error {	message := pb.BurndownAnalysisResults{		Granularity: int32(result.granularity),		Sampling:    int32(result.sampling),	}	if len(result.GlobalHistory) > 0 {		message.Project = pb.ToBurndownSparseMatrix(result.GlobalHistory, "project")	}	if len(result.FileHistories) > 0 {		message.Files = make([]*pb.BurndownSparseMatrix, len(result.FileHistories))		keys := sortedKeys(result.FileHistories)		i := 0		for _, key := range keys {			message.Files[i] = pb.ToBurndownSparseMatrix(				result.FileHistories[key], key)			i++		}	}	if len(result.PeopleHistories) > 0 {		message.People = make(			[]*pb.BurndownSparseMatrix, len(result.PeopleHistories))		for key, val := range result.PeopleHistories {			if len(val) > 0 {				message.People[key] = pb.ToBurndownSparseMatrix(val, result.reversedPeopleDict[key])			}		}		message.PeopleInteraction = pb.DenseToCompressedSparseRowMatrix(result.PeopleMatrix)	}	serialized, err := proto.Marshal(&message)	if err != nil {		return err	}	_, err = writer.Write(serialized)	return err}func sortedKeys(m map[string]DenseHistory) []string {	keys := make([]string, 0, len(m))	for k := range m {		keys = append(keys, k)	}	sort.Strings(keys)	return keys}func checkClose(c io.Closer) {	if err := c.Close(); err != nil {		panic(err)	}}// We do a hack and store the day in the first 14 bits and the author index in the last 18.// Strictly speaking, int can be 64-bit and then the author index occupies 32+18 bits.// This hack is needed to simplify the values storage inside File-s. We can compare// different values together and they are compared as days for the same author.func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {	if analyser.PeopleNumber == 0 {		return day	}	result := day & burndown.TreeMergeMark	result |= person << burndown.TreeMaxBinPower	// This effectively means max (16383 - 1) days (>44 years) and (131072 - 2) devs.	// One day less because burndown.TreeMergeMark = ((1 << 14) - 1) is a special day.	return result}func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {	if analyser.PeopleNumber == 0 {		return identity.AuthorMissing, value	}	return value >> burndown.TreeMaxBinPower, value & burndown.TreeMergeMark}func (analyser *BurndownAnalysis) onNewDay() {	if analyser.day > analyser.previousDay {		analyser.previousDay = analyser.day	}	analyser.mergedAuthor = identity.AuthorMissing}func (analyser *BurndownAnalysis) updateGlobal(currentTime, previousTime, delta int) {	_, currentDay := analyser.unpackPersonWithDay(currentTime)	_, previousDay := analyser.unpackPersonWithDay(previousTime)	currentHistory := analyser.globalHistory[currentDay]	if currentHistory == nil {		currentHistory = map[int]int64{}		analyser.globalHistory[currentDay] = currentHistory	}	currentHistory[previousDay] += int64(delta)}// updateFile is bound to the specific `history` in the closure.func (analyser *BurndownAnalysis) updateFile(	history sparseHistory, currentTime, previousTime, delta int) {	_, currentDay := analyser.unpackPersonWithDay(currentTime)	_, previousDay := analyser.unpackPersonWithDay(previousTime)	currentHistory := history[currentDay]	if currentHistory == nil {		currentHistory = map[int]int64{}		history[currentDay] = currentHistory	}	currentHistory[previousDay] += int64(delta)}func (analyser *BurndownAnalysis) updateAuthor(currentTime, previousTime, delta int) {	previousAuthor, previousDay := analyser.unpackPersonWithDay(previousTime)	if previousAuthor == identity.AuthorMissing {		return	}	_, currentDay := analyser.unpackPersonWithDay(currentTime)	history := analyser.peopleHistories[previousAuthor]	if history == nil {		history = sparseHistory{}		analyser.peopleHistories[previousAuthor] = history	}	currentHistory := history[currentDay]	if currentHistory == nil {		currentHistory = map[int]int64{}		history[currentDay] = currentHistory	}	currentHistory[previousDay] += int64(delta)}func (analyser *BurndownAnalysis) updateMatrix(currentTime, previousTime, delta int) {	newAuthor, _ := analyser.unpackPersonWithDay(currentTime)	oldAuthor, _ := analyser.unpackPersonWithDay(previousTime)	if oldAuthor == identity.AuthorMissing {		return	}	if newAuthor == oldAuthor && delta > 0 {		newAuthor = authorSelf	}	row := analyser.matrix[oldAuthor]	if row == nil {		row = map[int]int64{}		analyser.matrix[oldAuthor] = row	}	cell, exists := row[newAuthor]	if !exists {		row[newAuthor] = 0		cell = 0	}	row[newAuthor] = cell + int64(delta)}func (analyser *BurndownAnalysis) newFile(	hash plumbing.Hash, name string, author int, day int, size int) (*burndown.File, error) {	updaters := make([]burndown.Updater, 1)	updaters[0] = analyser.updateGlobal	if analyser.TrackFiles {		history := analyser.fileHistories[name]		if history == nil {			// can be not nil if the file was created in a future branch			history = sparseHistory{}		}		analyser.fileHistories[name] = history		updaters = append(updaters, func(currentTime, previousTime, delta int) {			analyser.updateFile(history, currentTime, previousTime, delta)		})	}	if analyser.PeopleNumber > 0 {		updaters = append(updaters, analyser.updateAuthor)		updaters = append(updaters, analyser.updateMatrix)		day = analyser.packPersonWithDay(author, day)	}	return burndown.NewFile(day, size, analyser.fileAllocator, updaters...), nil}func (analyser *BurndownAnalysis) handleInsertion(	change *object.Change, author int, cache map[plumbing.Hash]*items.CachedBlob) error {	blob := cache[change.To.TreeEntry.Hash]	lines, err := blob.CountLines()	if err != nil {		// binary		return nil	}	name := change.To.Name	file, exists := analyser.files[name]	if exists {		log.Println("\n", analyser, "error")		return fmt.Errorf("file %s already exists", name)	}	var hash plumbing.Hash	if analyser.day != burndown.TreeMergeMark {		hash = blob.Hash	}	file, err = analyser.newFile(hash, name, author, analyser.day, lines)	analyser.files[name] = file	if analyser.day == burndown.TreeMergeMark {		analyser.mergedFiles[name] = true	}	return err}func (analyser *BurndownAnalysis) handleDeletion(	change *object.Change, author int, cache map[plumbing.Hash]*items.CachedBlob) error {	name := change.From.Name	file, exists := analyser.files[name]	blob := cache[change.From.TreeEntry.Hash]	lines, err := blob.CountLines()	if exists && err != nil {		return fmt.Errorf("file %s unexpectedly became binary", name)	}	if !exists {		return nil	}	file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)	file.Delete()	delete(analyser.files, name)	delete(analyser.fileHistories, name)	analyser.renames[name] = ""	if analyser.day == burndown.TreeMergeMark {		analyser.mergedFiles[name] = false	}	return nil}func (analyser *BurndownAnalysis) handleModification(	change *object.Change, author int, cache map[plumbing.Hash]*items.CachedBlob,	diffs map[string]items.FileDiffData) error {	if analyser.day == burndown.TreeMergeMark {		analyser.mergedFiles[change.To.Name] = true	}	file, exists := analyser.files[change.From.Name]	if !exists {		// this indeed may happen		return analyser.handleInsertion(change, author, cache)	}	// possible rename	if change.To.Name != change.From.Name {		err := analyser.handleRename(change.From.Name, change.To.Name)		if err != nil {			return err		}	}	// Check for binary changes	blobFrom := cache[change.From.TreeEntry.Hash]	_, errFrom := blobFrom.CountLines()	blobTo := cache[change.To.TreeEntry.Hash]	_, errTo := blobTo.CountLines()	if errFrom != errTo {		if errFrom != nil {			// the file is no longer binary			return analyser.handleInsertion(change, author, cache)		}		// the file became binary		return analyser.handleDeletion(change, author, cache)	} else if errFrom != nil {		// what are we doing here?!		return nil	}	thisDiffs := diffs[change.To.Name]	if file.Len() != thisDiffs.OldLinesOfCode {		log.Printf("====TREE====\n%s", file.Dump())		return fmt.Errorf("%s: internal integrity error src %d != %d %s -> %s",			change.To.Name, thisDiffs.OldLinesOfCode, file.Len(),			change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String())	}	// we do not call RunesToDiffLines so the number of lines equals	// to the rune count	position := 0	pending := diffmatchpatch.Diff{Text: ""}	apply := func(edit diffmatchpatch.Diff) {		length := utf8.RuneCountInString(edit.Text)		if edit.Type == diffmatchpatch.DiffInsert {			file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)			position += length		} else {			file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)		}		if analyser.Debug {			file.Validate()		}	}	for _, edit := range thisDiffs.Diffs {		dumpBefore := ""		if analyser.Debug {			dumpBefore = file.Dump()		}		length := utf8.RuneCountInString(edit.Text)		debugError := func() {			log.Printf("%s: internal diff error\n", change.To.Name)			log.Printf("Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,				length, utf8.RuneCountInString(pending.Text))			if dumpBefore != "" {				log.Printf("====TREE BEFORE====\n%s====END====\n", dumpBefore)			}			log.Printf("====TREE AFTER====\n%s====END====\n", file.Dump())		}		switch edit.Type {		case diffmatchpatch.DiffEqual:			if pending.Text != "" {				apply(pending)				pending.Text = ""			}			position += length		case diffmatchpatch.DiffInsert:			if pending.Text != "" {				if pending.Type == diffmatchpatch.DiffInsert {					debugError()					return errors.New("DiffInsert may not appear after DiffInsert")				}				file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,					utf8.RuneCountInString(pending.Text))				if analyser.Debug {					file.Validate()				}				position += length				pending.Text = ""			} else {				pending = edit			}		case diffmatchpatch.DiffDelete:			if pending.Text != "" {				debugError()				return errors.New("DiffDelete may not appear after DiffInsert/DiffDelete")			}			pending = edit		default:			debugError()			return fmt.Errorf("diff operation is not supported: %d", edit.Type)		}	}	if pending.Text != "" {		apply(pending)		pending.Text = ""	}	if file.Len() != thisDiffs.NewLinesOfCode {		return fmt.Errorf("%s: internal integrity error dst %d != %d %s -> %s",			change.To.Name, thisDiffs.NewLinesOfCode, file.Len(),			change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String())	}	return nil}func (analyser *BurndownAnalysis) handleRename(from, to string) error {	if from == to {		return nil	}	file, exists := analyser.files[from]	if !exists {		return fmt.Errorf("file %s > %s does not exist (files)", from, to)	}	analyser.files[to] = file	delete(analyser.files, from)	if analyser.day == burndown.TreeMergeMark {		analyser.mergedFiles[from] = false	}	if analyser.TrackFiles {		history := analyser.fileHistories[from]		if history == nil {			// a future branch could have already renamed it and we are retarded			futureRename, exists := analyser.renames[from]			if futureRename == "" && exists {				// the file will be deleted in the future, whatever				history = sparseHistory{}			} else {				history = analyser.fileHistories[futureRename]				if history == nil {					return fmt.Errorf("file %s > %s does not exist (histories)", from, to)				}			}		}		analyser.fileHistories[to] = history		delete(analyser.fileHistories, from)	}	analyser.renames[from] = to	return nil}func (analyser *BurndownAnalysis) groupSparseHistory(	history sparseHistory, lastDay int) (DenseHistory, int) {	if len(history) == 0 {		panic("empty history")	}	var days []int	for day := range history {		days = append(days, day)	}	sort.Ints(days)	if lastDay >= 0 {		if days[len(days)-1] < lastDay {			days = append(days, lastDay)		} else if days[len(days)-1] > lastDay {			panic("days corruption")		}	} else {		lastDay = days[len(days)-1]	}	// [y][x]	// y - sampling	// x - granularity	samples := lastDay/analyser.Sampling + 1	bands := lastDay/analyser.Granularity + 1	result := make(DenseHistory, samples)	for i := 0; i < bands; i++ {		result[i] = make([]int64, bands)	}	prevsi := 0	for _, day := range days {		si := day / analyser.Sampling		if si > prevsi {			state := result[prevsi]			for i := prevsi + 1; i <= si; i++ {				copy(result[i], state)			}			prevsi = si		}		sample := result[si]		for bday, value := range history[day] {			sample[bday/analyser.Granularity] += value		}	}	return result, lastDay}func init() {	core.Registry.Register(&BurndownAnalysis{})}
 |