burndown.go 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193
  1. package leaves
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "log"
  7. "sort"
  8. "sync"
  9. "unicode/utf8"
  10. "github.com/gogo/protobuf/proto"
  11. "github.com/sergi/go-diff/diffmatchpatch"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  16. "gopkg.in/src-d/hercules.v4/internal/burndown"
  17. "gopkg.in/src-d/hercules.v4/internal/core"
  18. "gopkg.in/src-d/hercules.v4/internal/pb"
  19. items "gopkg.in/src-d/hercules.v4/internal/plumbing"
  20. "gopkg.in/src-d/hercules.v4/internal/plumbing/identity"
  21. "gopkg.in/src-d/hercules.v4/yaml"
  22. )
  23. // BurndownAnalysis allows to gather the line burndown statistics for a Git repository.
  24. // It is a LeafPipelineItem.
  25. // Reference: https://erikbern.com/2016/12/05/the-half-life-of-code.html
  26. type BurndownAnalysis struct {
  27. // Granularity sets the size of each band - the number of days it spans.
  28. // Smaller values provide better resolution but require more work and eat more
  29. // memory. 30 days is usually enough.
  30. Granularity int
  31. // Sampling sets how detailed is the statistic - the size of the interval in
  32. // days between consecutive measurements. It may not be greater than Granularity. Try 15 or 30.
  33. Sampling int
  34. // TrackFiles enables or disables the fine-grained per-file burndown analysis.
  35. // It does not change the project level burndown results.
  36. TrackFiles bool
  37. // The number of developers for which to collect the burndown stats. 0 disables it.
  38. PeopleNumber int
  39. // Debug activates the debugging mode. Analyse() runs slower in this mode
  40. // but it accurately checks all the intermediate states for invariant
  41. // violations.
  42. Debug bool
  43. // Repository points to the analysed Git repository struct from go-git.
  44. repository *git.Repository
  45. // globalHistory is the daily deltas of daily line counts.
  46. // E.g. day 0: day 0 +50 lines
  47. // day 10: day 0 -10 lines; day 10 +20 lines
  48. // day 12: day 0 -5 lines; day 10 -3 lines; day 12 +10 lines
  49. // map [0] [0] = 50
  50. // map[10] [0] = -10
  51. // map[10][10] = 20
  52. // map[12] [0] = -5
  53. // map[12][10] = -3
  54. // map[12][12] = 10
  55. globalHistory sparseHistory
  56. // fileHistories is the daily deltas of each file's daily line counts.
  57. fileHistories map[string]sparseHistory
  58. // peopleHistories is the daily deltas of each person's daily line counts.
  59. peopleHistories []sparseHistory
  60. // files is the mapping <file path> -> *File.
  61. files map[string]*burndown.File
  62. // renames is a quick and dirty solution for the "future branch renames" problem.
  63. renames map[string]string
  64. // matrix is the mutual deletions and self insertions.
  65. matrix []map[int]int64
  66. // day is the most recent day index processed.
  67. day int
  68. // previousDay is the day from the previous sample period -
  69. // different from DaysSinceStart.previousDay.
  70. previousDay int
  71. // references IdentityDetector.ReversedPeopleDict
  72. reversedPeopleDict []string
  73. }
  74. // BurndownResult carries the result of running BurndownAnalysis - it is returned by
  75. // BurndownAnalysis.Finalize().
  76. type BurndownResult struct {
  77. // [number of samples][number of bands]
  78. // The number of samples depends on Sampling: the less Sampling, the bigger the number.
  79. // The number of bands depends on Granularity: the less Granularity, the bigger the number.
  80. GlobalHistory DenseHistory
  81. // The key is the path inside the Git repository. The value's dimensions are the same as
  82. // in GlobalHistory.
  83. FileHistories map[string]DenseHistory
  84. // [number of people][number of samples][number of bands]
  85. PeopleHistories []DenseHistory
  86. // [number of people][number of people + 2]
  87. // The first element is the total number of lines added by the author.
  88. // The second element is the number of removals by unidentified authors (outside reversedPeopleDict).
  89. // The rest of the elements are equal the number of line removals by the corresponding
  90. // authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.
  91. PeopleMatrix DenseHistory
  92. // The following members are private.
  93. // reversedPeopleDict is borrowed from IdentityDetector and becomes available after
  94. // Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
  95. // facts[FactIdentityDetectorReversedPeopleDict].
  96. reversedPeopleDict []string
  97. // sampling and granularity are copied from BurndownAnalysis and stored for service purposes
  98. // such as merging several results together.
  99. sampling int
  100. granularity int
  101. }
  102. const (
  103. // ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.
  104. ConfigBurndownGranularity = "Burndown.Granularity"
  105. // ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.
  106. ConfigBurndownSampling = "Burndown.Sampling"
  107. // ConfigBurndownTrackFiles enables burndown collection for files.
  108. ConfigBurndownTrackFiles = "Burndown.TrackFiles"
  109. // ConfigBurndownTrackPeople enables burndown collection for authors.
  110. ConfigBurndownTrackPeople = "Burndown.TrackPeople"
  111. // ConfigBurndownDebug enables some extra debug assertions.
  112. ConfigBurndownDebug = "Burndown.Debug"
  113. // DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
  114. // and BurndownAnalysis.Sampling.
  115. DefaultBurndownGranularity = 30
  116. // authorSelf is the internal author index which is used in BurndownAnalysis.Finalize() to
  117. // format the author overwrites matrix.
  118. authorSelf = (1 << (32 - burndown.TreeMaxBinPower)) - 2
  119. )
  120. type sparseHistory = map[int]map[int]int64
  121. // DenseHistory is the matrix [number of samples][number of bands] -> number of lines.
  122. type DenseHistory = [][]int64
  123. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  124. func (analyser *BurndownAnalysis) Name() string {
  125. return "Burndown"
  126. }
  127. // Provides returns the list of names of entities which are produced by this PipelineItem.
  128. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  129. // to this list. Also used by core.Registry to build the global map of providers.
  130. func (analyser *BurndownAnalysis) Provides() []string {
  131. return []string{}
  132. }
  133. // Requires returns the list of names of entities which are needed by this PipelineItem.
  134. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  135. // entities are Provides() upstream.
  136. func (analyser *BurndownAnalysis) Requires() []string {
  137. arr := [...]string{
  138. items.DependencyFileDiff, items.DependencyTreeChanges, items.DependencyBlobCache,
  139. items.DependencyDay, identity.DependencyAuthor}
  140. return arr[:]
  141. }
  142. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  143. func (analyser *BurndownAnalysis) ListConfigurationOptions() []core.ConfigurationOption {
  144. options := [...]core.ConfigurationOption{{
  145. Name: ConfigBurndownGranularity,
  146. Description: "How many days there are in a single band.",
  147. Flag: "granularity",
  148. Type: core.IntConfigurationOption,
  149. Default: DefaultBurndownGranularity}, {
  150. Name: ConfigBurndownSampling,
  151. Description: "How frequently to record the state in days.",
  152. Flag: "sampling",
  153. Type: core.IntConfigurationOption,
  154. Default: DefaultBurndownGranularity}, {
  155. Name: ConfigBurndownTrackFiles,
  156. Description: "Record detailed statistics per each file.",
  157. Flag: "burndown-files",
  158. Type: core.BoolConfigurationOption,
  159. Default: false}, {
  160. Name: ConfigBurndownTrackPeople,
  161. Description: "Record detailed statistics per each developer.",
  162. Flag: "burndown-people",
  163. Type: core.BoolConfigurationOption,
  164. Default: false}, {
  165. Name: ConfigBurndownDebug,
  166. Description: "Validate the trees on each step.",
  167. Flag: "burndown-debug",
  168. Type: core.BoolConfigurationOption,
  169. Default: false},
  170. }
  171. return options[:]
  172. }
  173. // Configure sets the properties previously published by ListConfigurationOptions().
  174. func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) {
  175. if val, exists := facts[ConfigBurndownGranularity].(int); exists {
  176. analyser.Granularity = val
  177. }
  178. if val, exists := facts[ConfigBurndownSampling].(int); exists {
  179. analyser.Sampling = val
  180. }
  181. if val, exists := facts[ConfigBurndownTrackFiles].(bool); exists {
  182. analyser.TrackFiles = val
  183. }
  184. if people, exists := facts[ConfigBurndownTrackPeople].(bool); people {
  185. if val, exists := facts[identity.FactIdentityDetectorPeopleCount].(int); exists {
  186. analyser.PeopleNumber = val
  187. analyser.reversedPeopleDict = facts[identity.FactIdentityDetectorReversedPeopleDict].([]string)
  188. }
  189. } else if exists {
  190. analyser.PeopleNumber = 0
  191. }
  192. if val, exists := facts[ConfigBurndownDebug].(bool); exists {
  193. analyser.Debug = val
  194. }
  195. }
  196. // Flag for the command line switch which enables this analysis.
  197. func (analyser *BurndownAnalysis) Flag() string {
  198. return "burndown"
  199. }
  200. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  201. // calls. The repository which is going to be analysed is supplied as an argument.
  202. func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) {
  203. if analyser.Granularity <= 0 {
  204. log.Printf("Warning: adjusted the granularity to %d days\n",
  205. DefaultBurndownGranularity)
  206. analyser.Granularity = DefaultBurndownGranularity
  207. }
  208. if analyser.Sampling <= 0 {
  209. log.Printf("Warning: adjusted the sampling to %d days\n",
  210. DefaultBurndownGranularity)
  211. analyser.Sampling = DefaultBurndownGranularity
  212. }
  213. if analyser.Sampling > analyser.Granularity {
  214. log.Printf("Warning: granularity may not be less than sampling, adjusted to %d\n",
  215. analyser.Granularity)
  216. analyser.Sampling = analyser.Granularity
  217. }
  218. analyser.repository = repository
  219. analyser.globalHistory = sparseHistory{}
  220. analyser.fileHistories = map[string]sparseHistory{}
  221. analyser.peopleHistories = make([]sparseHistory, analyser.PeopleNumber)
  222. analyser.files = map[string]*burndown.File{}
  223. analyser.renames = map[string]string{}
  224. analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)
  225. analyser.day = 0
  226. analyser.previousDay = 0
  227. }
  228. // Consume runs this PipelineItem on the next commit's data.
  229. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  230. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  231. // This function returns the mapping with analysis results. The keys must be the same as
  232. // in Provides(). If there was an error, nil is returned.
  233. func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  234. author := deps[identity.DependencyAuthor].(int)
  235. day := deps[items.DependencyDay].(int)
  236. if !core.IsMergeCommit(deps) {
  237. analyser.day = day
  238. analyser.onNewDay()
  239. } else {
  240. // effectively disables the status updates if the commit is a merge
  241. // we will analyse the conflicts resolution in Merge()
  242. analyser.day = burndown.TreeMergeMark
  243. }
  244. cache := deps[items.DependencyBlobCache].(map[plumbing.Hash]*object.Blob)
  245. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  246. fileDiffs := deps[items.DependencyFileDiff].(map[string]items.FileDiffData)
  247. for _, change := range treeDiffs {
  248. action, _ := change.Action()
  249. var err error
  250. switch action {
  251. case merkletrie.Insert:
  252. err = analyser.handleInsertion(change, author, cache)
  253. case merkletrie.Delete:
  254. err = analyser.handleDeletion(change, author, cache)
  255. case merkletrie.Modify:
  256. err = analyser.handleModification(change, author, cache, fileDiffs)
  257. }
  258. if err != nil {
  259. return nil, err
  260. }
  261. }
  262. // in case there is a merge analyser.day equals to TreeMergeMark
  263. analyser.day = day
  264. return nil, nil
  265. }
  266. // Fork clones this item. Everything is copied by reference except the files
  267. // which are copied by value.
  268. func (analyser *BurndownAnalysis) Fork(n int) []core.PipelineItem {
  269. result := make([]core.PipelineItem, n)
  270. for i := range result {
  271. clone := *analyser
  272. clone.files = map[string]*burndown.File{}
  273. for key, file := range analyser.files {
  274. clone.files[key] = file.Clone(false)
  275. }
  276. result[i] = &clone
  277. }
  278. return result
  279. }
  280. // Merge combines several items together. We apply the special file merging logic here.
  281. func (analyser *BurndownAnalysis) Merge(branches []core.PipelineItem) {
  282. for key, file := range analyser.files {
  283. others := make([]*burndown.File, 0, len(branches))
  284. for _, branch := range branches {
  285. file := branch.(*BurndownAnalysis).files[key]
  286. if file != nil {
  287. // file can be nil if it is considered binary in the other branch
  288. others = append(others, file)
  289. }
  290. }
  291. // don't worry, we compare the hashes first before heavy-lifting
  292. if file.Merge(analyser.day, others...) {
  293. for _, branch := range branches {
  294. branch.(*BurndownAnalysis).files[key] = file.Clone(false)
  295. }
  296. }
  297. }
  298. analyser.onNewDay()
  299. }
  300. // Finalize returns the result of the analysis. Further Consume() calls are not expected.
  301. func (analyser *BurndownAnalysis) Finalize() interface{} {
  302. globalHistory, lastDay := analyser.groupSparseHistory(analyser.globalHistory, -1)
  303. fileHistories := map[string]DenseHistory{}
  304. for key, history := range analyser.fileHistories {
  305. fileHistories[key], _ = analyser.groupSparseHistory(history, lastDay)
  306. }
  307. peopleHistories := make([]DenseHistory, analyser.PeopleNumber)
  308. for i, history := range analyser.peopleHistories {
  309. if len(history) > 0 {
  310. // there can be people with only trivial merge commits and without own lines
  311. peopleHistories[i], _ = analyser.groupSparseHistory(history, lastDay)
  312. } else {
  313. peopleHistories[i] = make(DenseHistory, len(globalHistory))
  314. for j, gh := range globalHistory {
  315. peopleHistories[i][j] = make([]int64, len(gh))
  316. }
  317. }
  318. }
  319. peopleMatrix := make(DenseHistory, analyser.PeopleNumber)
  320. for i, row := range analyser.matrix {
  321. mrow := make([]int64, analyser.PeopleNumber+2)
  322. peopleMatrix[i] = mrow
  323. for key, val := range row {
  324. if key == identity.AuthorMissing {
  325. key = -1
  326. } else if key == authorSelf {
  327. key = -2
  328. }
  329. mrow[key+2] = val
  330. }
  331. }
  332. return BurndownResult{
  333. GlobalHistory: globalHistory,
  334. FileHistories: fileHistories,
  335. PeopleHistories: peopleHistories,
  336. PeopleMatrix: peopleMatrix,
  337. reversedPeopleDict: analyser.reversedPeopleDict,
  338. sampling: analyser.Sampling,
  339. granularity: analyser.Granularity,
  340. }
  341. }
  342. // Serialize converts the analysis result as returned by Finalize() to text or bytes.
  343. // The text format is YAML and the bytes format is Protocol Buffers.
  344. func (analyser *BurndownAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
  345. burndownResult := result.(BurndownResult)
  346. if binary {
  347. return analyser.serializeBinary(&burndownResult, writer)
  348. }
  349. analyser.serializeText(&burndownResult, writer)
  350. return nil
  351. }
  352. // Deserialize converts the specified protobuf bytes to BurndownResult.
  353. func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, error) {
  354. msg := pb.BurndownAnalysisResults{}
  355. err := proto.Unmarshal(pbmessage, &msg)
  356. if err != nil {
  357. return nil, err
  358. }
  359. result := BurndownResult{}
  360. convertCSR := func(mat *pb.BurndownSparseMatrix) DenseHistory {
  361. res := make(DenseHistory, mat.NumberOfRows)
  362. for i := 0; i < int(mat.NumberOfRows); i++ {
  363. res[i] = make([]int64, mat.NumberOfColumns)
  364. for j := 0; j < len(mat.Rows[i].Columns); j++ {
  365. res[i][j] = int64(mat.Rows[i].Columns[j])
  366. }
  367. }
  368. return res
  369. }
  370. result.GlobalHistory = convertCSR(msg.Project)
  371. result.FileHistories = map[string]DenseHistory{}
  372. for _, mat := range msg.Files {
  373. result.FileHistories[mat.Name] = convertCSR(mat)
  374. }
  375. result.reversedPeopleDict = make([]string, len(msg.People))
  376. result.PeopleHistories = make([]DenseHistory, len(msg.People))
  377. for i, mat := range msg.People {
  378. result.PeopleHistories[i] = convertCSR(mat)
  379. result.reversedPeopleDict[i] = mat.Name
  380. }
  381. if msg.PeopleInteraction != nil {
  382. result.PeopleMatrix = make(DenseHistory, msg.PeopleInteraction.NumberOfRows)
  383. }
  384. for i := 0; i < len(result.PeopleMatrix); i++ {
  385. result.PeopleMatrix[i] = make([]int64, msg.PeopleInteraction.NumberOfColumns)
  386. for j := int(msg.PeopleInteraction.Indptr[i]); j < int(msg.PeopleInteraction.Indptr[i+1]); j++ {
  387. result.PeopleMatrix[i][msg.PeopleInteraction.Indices[j]] = msg.PeopleInteraction.Data[j]
  388. }
  389. }
  390. result.sampling = int(msg.Sampling)
  391. result.granularity = int(msg.Granularity)
  392. return result, nil
  393. }
  394. // MergeResults combines two BurndownResult-s together.
  395. func (analyser *BurndownAnalysis) MergeResults(
  396. r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {
  397. bar1 := r1.(BurndownResult)
  398. bar2 := r2.(BurndownResult)
  399. merged := BurndownResult{}
  400. if bar1.sampling < bar2.sampling {
  401. merged.sampling = bar1.sampling
  402. } else {
  403. merged.sampling = bar2.sampling
  404. }
  405. if bar1.granularity < bar2.granularity {
  406. merged.granularity = bar1.granularity
  407. } else {
  408. merged.granularity = bar2.granularity
  409. }
  410. var people map[string][3]int
  411. people, merged.reversedPeopleDict = identity.Detector{}.MergeReversedDicts(
  412. bar1.reversedPeopleDict, bar2.reversedPeopleDict)
  413. var wg sync.WaitGroup
  414. if len(bar1.GlobalHistory) > 0 || len(bar2.GlobalHistory) > 0 {
  415. wg.Add(1)
  416. go func() {
  417. defer wg.Done()
  418. merged.GlobalHistory = mergeMatrices(
  419. bar1.GlobalHistory, bar2.GlobalHistory,
  420. bar1.granularity, bar1.sampling,
  421. bar2.granularity, bar2.sampling,
  422. c1, c2)
  423. }()
  424. }
  425. if len(bar1.FileHistories) > 0 || len(bar2.FileHistories) > 0 {
  426. merged.FileHistories = map[string]DenseHistory{}
  427. historyMutex := sync.Mutex{}
  428. for key, fh1 := range bar1.FileHistories {
  429. if fh2, exists := bar2.FileHistories[key]; exists {
  430. wg.Add(1)
  431. go func(fh1, fh2 DenseHistory, key string) {
  432. defer wg.Done()
  433. historyMutex.Lock()
  434. defer historyMutex.Unlock()
  435. merged.FileHistories[key] = mergeMatrices(
  436. fh1, fh2, bar1.granularity, bar1.sampling, bar2.granularity, bar2.sampling, c1, c2)
  437. }(fh1, fh2, key)
  438. } else {
  439. historyMutex.Lock()
  440. merged.FileHistories[key] = fh1
  441. historyMutex.Unlock()
  442. }
  443. }
  444. for key, fh2 := range bar2.FileHistories {
  445. if _, exists := bar1.FileHistories[key]; !exists {
  446. historyMutex.Lock()
  447. merged.FileHistories[key] = fh2
  448. historyMutex.Unlock()
  449. }
  450. }
  451. }
  452. if len(merged.reversedPeopleDict) > 0 {
  453. merged.PeopleHistories = make([]DenseHistory, len(merged.reversedPeopleDict))
  454. for i, key := range merged.reversedPeopleDict {
  455. ptrs := people[key]
  456. if ptrs[1] < 0 {
  457. if len(bar2.PeopleHistories) > 0 {
  458. merged.PeopleHistories[i] = bar2.PeopleHistories[ptrs[2]]
  459. }
  460. } else if ptrs[2] < 0 {
  461. if len(bar1.PeopleHistories) > 0 {
  462. merged.PeopleHistories[i] = bar1.PeopleHistories[ptrs[1]]
  463. }
  464. } else {
  465. wg.Add(1)
  466. go func(i int) {
  467. defer wg.Done()
  468. var m1, m2 DenseHistory
  469. if len(bar1.PeopleHistories) > 0 {
  470. m1 = bar1.PeopleHistories[ptrs[1]]
  471. }
  472. if len(bar2.PeopleHistories) > 0 {
  473. m2 = bar2.PeopleHistories[ptrs[2]]
  474. }
  475. merged.PeopleHistories[i] = mergeMatrices(
  476. m1, m2,
  477. bar1.granularity, bar1.sampling,
  478. bar2.granularity, bar2.sampling,
  479. c1, c2,
  480. )
  481. }(i)
  482. }
  483. }
  484. wg.Add(1)
  485. go func() {
  486. defer wg.Done()
  487. if len(bar2.PeopleMatrix) == 0 {
  488. merged.PeopleMatrix = bar1.PeopleMatrix
  489. // extend the matrix in both directions
  490. for i := 0; i < len(merged.PeopleMatrix); i++ {
  491. for j := len(bar1.reversedPeopleDict); j < len(merged.reversedPeopleDict); j++ {
  492. merged.PeopleMatrix[i] = append(merged.PeopleMatrix[i], 0)
  493. }
  494. }
  495. for i := len(bar1.reversedPeopleDict); i < len(merged.reversedPeopleDict); i++ {
  496. merged.PeopleMatrix = append(
  497. merged.PeopleMatrix, make([]int64, len(merged.reversedPeopleDict)+2))
  498. }
  499. } else {
  500. merged.PeopleMatrix = make(DenseHistory, len(merged.reversedPeopleDict))
  501. for i := range merged.PeopleMatrix {
  502. merged.PeopleMatrix[i] = make([]int64, len(merged.reversedPeopleDict)+2)
  503. }
  504. for i, key := range bar1.reversedPeopleDict {
  505. mi := people[key][0] // index in merged.reversedPeopleDict
  506. copy(merged.PeopleMatrix[mi][:2], bar1.PeopleMatrix[i][:2])
  507. for j, val := range bar1.PeopleMatrix[i][2:] {
  508. merged.PeopleMatrix[mi][2+people[bar1.reversedPeopleDict[j]][0]] = val
  509. }
  510. }
  511. for i, key := range bar2.reversedPeopleDict {
  512. mi := people[key][0] // index in merged.reversedPeopleDict
  513. merged.PeopleMatrix[mi][0] += bar2.PeopleMatrix[i][0]
  514. merged.PeopleMatrix[mi][1] += bar2.PeopleMatrix[i][1]
  515. for j, val := range bar2.PeopleMatrix[i][2:] {
  516. merged.PeopleMatrix[mi][2+people[bar2.reversedPeopleDict[j]][0]] += val
  517. }
  518. }
  519. }
  520. }()
  521. }
  522. wg.Wait()
  523. return merged
  524. }
  525. // mergeMatrices takes two [number of samples][number of bands] matrices,
  526. // resamples them to days so that they become square, sums and resamples back to the
  527. // least of (sampling1, sampling2) and (granularity1, granularity2).
  528. func mergeMatrices(m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int,
  529. c1, c2 *core.CommonAnalysisResult) DenseHistory {
  530. commonMerged := *c1
  531. commonMerged.Merge(c2)
  532. var granularity, sampling int
  533. if sampling1 < sampling2 {
  534. sampling = sampling1
  535. } else {
  536. sampling = sampling2
  537. }
  538. if granularity1 < granularity2 {
  539. granularity = granularity1
  540. } else {
  541. granularity = granularity2
  542. }
  543. size := int((commonMerged.EndTime - commonMerged.BeginTime) / (3600 * 24))
  544. daily := make([][]float32, size+granularity)
  545. for i := range daily {
  546. daily[i] = make([]float32, size+sampling)
  547. }
  548. if len(m1) > 0 {
  549. addBurndownMatrix(m1, granularity1, sampling1, daily,
  550. int(c1.BeginTime-commonMerged.BeginTime)/(3600*24))
  551. }
  552. if len(m2) > 0 {
  553. addBurndownMatrix(m2, granularity2, sampling2, daily,
  554. int(c2.BeginTime-commonMerged.BeginTime)/(3600*24))
  555. }
  556. // convert daily to [][]int64
  557. result := make(DenseHistory, (size+sampling-1)/sampling)
  558. for i := range result {
  559. result[i] = make([]int64, (size+granularity-1)/granularity)
  560. sampledIndex := i * sampling
  561. if i == len(result)-1 {
  562. sampledIndex = size - 1
  563. }
  564. for j := 0; j < len(result[i]); j++ {
  565. accum := float32(0)
  566. for k := j * granularity; k < (j+1)*granularity && k < size; k++ {
  567. accum += daily[sampledIndex][k]
  568. }
  569. result[i][j] = int64(accum)
  570. }
  571. }
  572. return result
  573. }
  574. // Explode `matrix` so that it is daily sampled and has daily bands, shift by `offset` days
  575. // and add to the accumulator. `daily` size is square and is guaranteed to fit `matrix` by
  576. // the caller.
  577. // Rows: *at least* len(matrix) * sampling + offset
  578. // Columns: *at least* len(matrix[...]) * granularity + offset
  579. // `matrix` can be sparse, so that the last columns which are equal to 0 are truncated.
  580. func addBurndownMatrix(matrix DenseHistory, granularity, sampling int, daily [][]float32, offset int) {
  581. // Determine the maximum number of bands; the actual one may be larger but we do not care
  582. maxCols := 0
  583. for _, row := range matrix {
  584. if maxCols < len(row) {
  585. maxCols = len(row)
  586. }
  587. }
  588. neededRows := len(matrix)*sampling + offset
  589. if len(daily) < neededRows {
  590. panic(fmt.Sprintf("merge bug: too few daily rows: required %d, have %d",
  591. neededRows, len(daily)))
  592. }
  593. if len(daily[0]) < maxCols {
  594. panic(fmt.Sprintf("merge bug: too few daily cols: required %d, have %d",
  595. maxCols, len(daily[0])))
  596. }
  597. for x := 0; x < maxCols; x++ {
  598. for y := 0; y < len(matrix); y++ {
  599. if x*granularity > (y+1)*sampling {
  600. // the future is zeros
  601. continue
  602. }
  603. decay := func(startIndex int, startVal float32) {
  604. if startVal == 0 {
  605. return
  606. }
  607. k := float32(matrix[y][x]) / startVal // <= 1
  608. scale := float32((y+1)*sampling - startIndex)
  609. for i := x * granularity; i < (x+1)*granularity; i++ {
  610. initial := daily[startIndex-1+offset][i+offset]
  611. for j := startIndex; j < (y+1)*sampling; j++ {
  612. daily[j+offset][i+offset] = initial * (1 + (k-1)*float32(j-startIndex+1)/scale)
  613. }
  614. }
  615. }
  616. raise := func(finishIndex int, finishVal float32) {
  617. var initial float32
  618. if y > 0 {
  619. initial = float32(matrix[y-1][x])
  620. }
  621. startIndex := y * sampling
  622. if startIndex < x*granularity {
  623. startIndex = x * granularity
  624. }
  625. if startIndex == finishIndex {
  626. return
  627. }
  628. avg := (finishVal - initial) / float32(finishIndex-startIndex)
  629. for j := y * sampling; j < finishIndex; j++ {
  630. for i := startIndex; i <= j; i++ {
  631. daily[j+offset][i+offset] = avg
  632. }
  633. }
  634. // copy [x*g..y*s)
  635. for j := y * sampling; j < finishIndex; j++ {
  636. for i := x * granularity; i < y*sampling; i++ {
  637. daily[j+offset][i+offset] = daily[j-1+offset][i+offset]
  638. }
  639. }
  640. }
  641. if (x+1)*granularity >= (y+1)*sampling {
  642. // x*granularity <= (y+1)*sampling
  643. // 1. x*granularity <= y*sampling
  644. // y*sampling..(y+1)sampling
  645. //
  646. // x+1
  647. // /
  648. // /
  649. // / y+1 -|
  650. // / |
  651. // / y -|
  652. // /
  653. // / x
  654. //
  655. // 2. x*granularity > y*sampling
  656. // x*granularity..(y+1)sampling
  657. //
  658. // x+1
  659. // /
  660. // /
  661. // / y+1 -|
  662. // / |
  663. // / x -|
  664. // /
  665. // / y
  666. if x*granularity <= y*sampling {
  667. raise((y+1)*sampling, float32(matrix[y][x]))
  668. } else if (y+1)*sampling > x*granularity {
  669. raise((y+1)*sampling, float32(matrix[y][x]))
  670. avg := float32(matrix[y][x]) / float32((y+1)*sampling-x*granularity)
  671. for j := x * granularity; j < (y+1)*sampling; j++ {
  672. for i := x * granularity; i <= j; i++ {
  673. daily[j+offset][i+offset] = avg
  674. }
  675. }
  676. }
  677. } else if (x+1)*granularity >= y*sampling {
  678. // y*sampling <= (x+1)*granularity < (y+1)sampling
  679. // y*sampling..(x+1)*granularity
  680. // (x+1)*granularity..(y+1)sampling
  681. // x+1
  682. // /\
  683. // / \
  684. // / \
  685. // / y+1
  686. // /
  687. // y
  688. v1 := float32(matrix[y-1][x])
  689. v2 := float32(matrix[y][x])
  690. var peak float32
  691. delta := float32((x+1)*granularity - y*sampling)
  692. var scale float32
  693. var previous float32
  694. if y > 0 && (y-1)*sampling >= x*granularity {
  695. // x*g <= (y-1)*s <= y*s <= (x+1)*g <= (y+1)*s
  696. // |________|.......^
  697. if y > 1 {
  698. previous = float32(matrix[y-2][x])
  699. }
  700. scale = float32(sampling)
  701. } else {
  702. // (y-1)*s < x*g <= y*s <= (x+1)*g <= (y+1)*s
  703. // |______|.......^
  704. if y == 0 {
  705. scale = float32(sampling)
  706. } else {
  707. scale = float32(y*sampling - x*granularity)
  708. }
  709. }
  710. peak = v1 + (v1-previous)/scale*delta
  711. if v2 > peak {
  712. // we need to adjust the peak, it may not be less than the decayed value
  713. if y < len(matrix)-1 {
  714. // y*s <= (x+1)*g <= (y+1)*s < (y+2)*s
  715. // ^.........|_________|
  716. k := (v2 - float32(matrix[y+1][x])) / float32(sampling) // > 0
  717. peak = float32(matrix[y][x]) + k*float32((y+1)*sampling-(x+1)*granularity)
  718. // peak > v2 > v1
  719. } else {
  720. peak = v2
  721. // not enough data to interpolate; this is at least not restricted
  722. }
  723. }
  724. raise((x+1)*granularity, peak)
  725. decay((x+1)*granularity, peak)
  726. } else {
  727. // (x+1)*granularity < y*sampling
  728. // y*sampling..(y+1)sampling
  729. decay(y*sampling, float32(matrix[y-1][x]))
  730. }
  731. }
  732. }
  733. }
  734. func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
  735. fmt.Fprintln(writer, " granularity:", result.granularity)
  736. fmt.Fprintln(writer, " sampling:", result.sampling)
  737. yaml.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
  738. if len(result.FileHistories) > 0 {
  739. fmt.Fprintln(writer, " files:")
  740. keys := sortedKeys(result.FileHistories)
  741. for _, key := range keys {
  742. yaml.PrintMatrix(writer, result.FileHistories[key], 4, key, true)
  743. }
  744. }
  745. if len(result.PeopleHistories) > 0 {
  746. fmt.Fprintln(writer, " people_sequence:")
  747. for key := range result.PeopleHistories {
  748. fmt.Fprintln(writer, " - "+yaml.SafeString(result.reversedPeopleDict[key]))
  749. }
  750. fmt.Fprintln(writer, " people:")
  751. for key, val := range result.PeopleHistories {
  752. yaml.PrintMatrix(writer, val, 4, result.reversedPeopleDict[key], true)
  753. }
  754. fmt.Fprintln(writer, " people_interaction: |-")
  755. yaml.PrintMatrix(writer, result.PeopleMatrix, 4, "", false)
  756. }
  757. }
  758. func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer io.Writer) error {
  759. message := pb.BurndownAnalysisResults{
  760. Granularity: int32(result.granularity),
  761. Sampling: int32(result.sampling),
  762. }
  763. if len(result.GlobalHistory) > 0 {
  764. message.Project = pb.ToBurndownSparseMatrix(result.GlobalHistory, "project")
  765. }
  766. if len(result.FileHistories) > 0 {
  767. message.Files = make([]*pb.BurndownSparseMatrix, len(result.FileHistories))
  768. keys := sortedKeys(result.FileHistories)
  769. i := 0
  770. for _, key := range keys {
  771. message.Files[i] = pb.ToBurndownSparseMatrix(
  772. result.FileHistories[key], key)
  773. i++
  774. }
  775. }
  776. if len(result.PeopleHistories) > 0 {
  777. message.People = make(
  778. []*pb.BurndownSparseMatrix, len(result.PeopleHistories))
  779. for key, val := range result.PeopleHistories {
  780. if len(val) > 0 {
  781. message.People[key] = pb.ToBurndownSparseMatrix(val, result.reversedPeopleDict[key])
  782. }
  783. }
  784. message.PeopleInteraction = pb.DenseToCompressedSparseRowMatrix(result.PeopleMatrix)
  785. }
  786. serialized, err := proto.Marshal(&message)
  787. if err != nil {
  788. return err
  789. }
  790. writer.Write(serialized)
  791. return nil
  792. }
  793. func sortedKeys(m map[string]DenseHistory) []string {
  794. keys := make([]string, 0, len(m))
  795. for k := range m {
  796. keys = append(keys, k)
  797. }
  798. sort.Strings(keys)
  799. return keys
  800. }
  801. func checkClose(c io.Closer) {
  802. if err := c.Close(); err != nil {
  803. panic(err)
  804. }
  805. }
  806. // We do a hack and store the day in the first 14 bits and the author index in the last 18.
  807. // Strictly speaking, int can be 64-bit and then the author index occupies 32+18 bits.
  808. // This hack is needed to simplify the values storage inside File-s. We can compare
  809. // different values together and they are compared as days for the same author.
  810. func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {
  811. if analyser.PeopleNumber == 0 {
  812. return day
  813. }
  814. result := day & burndown.TreeMergeMark
  815. result |= person << burndown.TreeMaxBinPower
  816. // This effectively means max (16383 - 1) days (>44 years) and (131072 - 2) devs.
  817. // One day less because burndown.TreeMergeMark = ((1 << 14) - 1) is a special day.
  818. return result
  819. }
  820. func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {
  821. if analyser.PeopleNumber == 0 {
  822. return identity.AuthorMissing, value
  823. }
  824. return value >> burndown.TreeMaxBinPower, value & burndown.TreeMergeMark
  825. }
  826. func (analyser *BurndownAnalysis) onNewDay() {
  827. if analyser.day > analyser.previousDay {
  828. analyser.previousDay = analyser.day
  829. }
  830. }
  831. func (analyser *BurndownAnalysis) updateGlobal(currentTime, previousTime, delta int) {
  832. _, currentDay := analyser.unpackPersonWithDay(currentTime)
  833. _, previousDay := analyser.unpackPersonWithDay(previousTime)
  834. currentHistory := analyser.globalHistory[currentDay]
  835. if currentHistory == nil {
  836. currentHistory = map[int]int64{}
  837. analyser.globalHistory[currentDay] = currentHistory
  838. }
  839. currentHistory[previousDay] += int64(delta)
  840. }
  841. // updateFile is bound to the specific `history` in the closure.
  842. func (analyser *BurndownAnalysis) updateFile(
  843. history sparseHistory, currentTime, previousTime, delta int) {
  844. _, currentDay := analyser.unpackPersonWithDay(currentTime)
  845. _, previousDay := analyser.unpackPersonWithDay(previousTime)
  846. currentHistory := history[currentDay]
  847. if currentHistory == nil {
  848. currentHistory = map[int]int64{}
  849. history[currentDay] = currentHistory
  850. }
  851. currentHistory[previousDay] += int64(delta)
  852. }
  853. func (analyser *BurndownAnalysis) updateAuthor(currentTime, previousTime, delta int) {
  854. previousAuthor, previousDay := analyser.unpackPersonWithDay(previousTime)
  855. if previousAuthor == identity.AuthorMissing {
  856. return
  857. }
  858. _, currentDay := analyser.unpackPersonWithDay(currentTime)
  859. history := analyser.peopleHistories[previousAuthor]
  860. if history == nil {
  861. history = sparseHistory{}
  862. analyser.peopleHistories[previousAuthor] = history
  863. }
  864. currentHistory := history[currentDay]
  865. if currentHistory == nil {
  866. currentHistory = map[int]int64{}
  867. history[currentDay] = currentHistory
  868. }
  869. currentHistory[previousDay] += int64(delta)
  870. }
  871. func (analyser *BurndownAnalysis) updateMatrix(currentTime, previousTime, delta int) {
  872. newAuthor, _ := analyser.unpackPersonWithDay(currentTime)
  873. oldAuthor, _ := analyser.unpackPersonWithDay(previousTime)
  874. if oldAuthor == identity.AuthorMissing {
  875. return
  876. }
  877. if newAuthor == oldAuthor && delta > 0 {
  878. newAuthor = authorSelf
  879. }
  880. row := analyser.matrix[oldAuthor]
  881. if row == nil {
  882. row = map[int]int64{}
  883. analyser.matrix[oldAuthor] = row
  884. }
  885. cell, exists := row[newAuthor]
  886. if !exists {
  887. row[newAuthor] = 0
  888. cell = 0
  889. }
  890. row[newAuthor] = cell + int64(delta)
  891. }
  892. func (analyser *BurndownAnalysis) newFile(
  893. hash plumbing.Hash, name string, author int, day int, size int) (*burndown.File, error) {
  894. updaters := make([]burndown.Updater, 1)
  895. updaters[0] = analyser.updateGlobal
  896. if analyser.TrackFiles {
  897. history := analyser.fileHistories[name]
  898. if history == nil {
  899. // can be not nil if the file was created in a future branch
  900. history = sparseHistory{}
  901. }
  902. analyser.fileHistories[name] = history
  903. updaters = append(updaters, func(currentTime, previousTime, delta int) {
  904. analyser.updateFile(history, currentTime, previousTime, delta)
  905. })
  906. }
  907. if analyser.PeopleNumber > 0 {
  908. updaters = append(updaters, analyser.updateAuthor)
  909. updaters = append(updaters, analyser.updateMatrix)
  910. day = analyser.packPersonWithDay(author, day)
  911. }
  912. return burndown.NewFile(hash, day, size, updaters...), nil
  913. }
  914. func (analyser *BurndownAnalysis) handleInsertion(
  915. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  916. blob := cache[change.To.TreeEntry.Hash]
  917. lines, err := items.CountLines(blob)
  918. if err != nil {
  919. if err.Error() == "binary" {
  920. return nil
  921. }
  922. return err
  923. }
  924. name := change.To.Name
  925. file, exists := analyser.files[name]
  926. if exists {
  927. return fmt.Errorf("file %s already exists", name)
  928. }
  929. file, err = analyser.newFile(blob.Hash, name, author, analyser.day, lines)
  930. analyser.files[name] = file
  931. return err
  932. }
  933. func (analyser *BurndownAnalysis) handleDeletion(
  934. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  935. blob := cache[change.From.TreeEntry.Hash]
  936. lines, err := items.CountLines(blob)
  937. if err != nil {
  938. if err.Error() == "binary" {
  939. return nil
  940. }
  941. return err
  942. }
  943. name := change.From.Name
  944. file := analyser.files[name]
  945. file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)
  946. file.Hash = plumbing.ZeroHash
  947. delete(analyser.files, name)
  948. delete(analyser.fileHistories, name)
  949. analyser.renames[name] = ""
  950. return nil
  951. }
  952. func (analyser *BurndownAnalysis) handleModification(
  953. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob,
  954. diffs map[string]items.FileDiffData) error {
  955. file, exists := analyser.files[change.From.Name]
  956. if !exists {
  957. // this indeed may happen
  958. return analyser.handleInsertion(change, author, cache)
  959. }
  960. file.Hash = change.To.TreeEntry.Hash
  961. // possible rename
  962. if change.To.Name != change.From.Name {
  963. err := analyser.handleRename(change.From.Name, change.To.Name)
  964. if err != nil {
  965. return err
  966. }
  967. }
  968. thisDiffs := diffs[change.To.Name]
  969. if file.Len() != thisDiffs.OldLinesOfCode {
  970. log.Printf("====TREE====\n%s", file.Dump())
  971. return fmt.Errorf("%s: internal integrity error src %d != %d %s -> %s",
  972. change.To.Name, thisDiffs.OldLinesOfCode, file.Len(),
  973. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String())
  974. }
  975. // we do not call RunesToDiffLines so the number of lines equals
  976. // to the rune count
  977. position := 0
  978. pending := diffmatchpatch.Diff{Text: ""}
  979. apply := func(edit diffmatchpatch.Diff) {
  980. length := utf8.RuneCountInString(edit.Text)
  981. if edit.Type == diffmatchpatch.DiffInsert {
  982. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)
  983. position += length
  984. } else {
  985. file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)
  986. }
  987. if analyser.Debug {
  988. file.Validate()
  989. }
  990. }
  991. for _, edit := range thisDiffs.Diffs {
  992. dumpBefore := ""
  993. if analyser.Debug {
  994. dumpBefore = file.Dump()
  995. }
  996. length := utf8.RuneCountInString(edit.Text)
  997. debugError := func() {
  998. log.Printf("%s: internal diff error\n", change.To.Name)
  999. log.Printf("Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,
  1000. length, utf8.RuneCountInString(pending.Text))
  1001. if dumpBefore != "" {
  1002. log.Printf("====TREE BEFORE====\n%s====END====\n", dumpBefore)
  1003. }
  1004. log.Printf("====TREE AFTER====\n%s====END====\n", file.Dump())
  1005. }
  1006. switch edit.Type {
  1007. case diffmatchpatch.DiffEqual:
  1008. if pending.Text != "" {
  1009. apply(pending)
  1010. pending.Text = ""
  1011. }
  1012. position += length
  1013. case diffmatchpatch.DiffInsert:
  1014. if pending.Text != "" {
  1015. if pending.Type == diffmatchpatch.DiffInsert {
  1016. debugError()
  1017. return errors.New("DiffInsert may not appear after DiffInsert")
  1018. }
  1019. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,
  1020. utf8.RuneCountInString(pending.Text))
  1021. if analyser.Debug {
  1022. file.Validate()
  1023. }
  1024. position += length
  1025. pending.Text = ""
  1026. } else {
  1027. pending = edit
  1028. }
  1029. case diffmatchpatch.DiffDelete:
  1030. if pending.Text != "" {
  1031. debugError()
  1032. return errors.New("DiffDelete may not appear after DiffInsert/DiffDelete")
  1033. }
  1034. pending = edit
  1035. default:
  1036. debugError()
  1037. return fmt.Errorf("diff operation is not supported: %d", edit.Type)
  1038. }
  1039. }
  1040. if pending.Text != "" {
  1041. apply(pending)
  1042. pending.Text = ""
  1043. }
  1044. if file.Len() != thisDiffs.NewLinesOfCode {
  1045. return fmt.Errorf("%s: internal integrity error dst %d != %d",
  1046. change.To.Name, thisDiffs.NewLinesOfCode, file.Len())
  1047. }
  1048. return nil
  1049. }
  1050. func (analyser *BurndownAnalysis) handleRename(from, to string) error {
  1051. if from == to {
  1052. return nil
  1053. }
  1054. file, exists := analyser.files[from]
  1055. if !exists {
  1056. return fmt.Errorf("file %s > %s does not exist (files)", from, to)
  1057. }
  1058. analyser.files[to] = file
  1059. delete(analyser.files, from)
  1060. if analyser.TrackFiles {
  1061. history := analyser.fileHistories[from]
  1062. if history == nil {
  1063. // a future branch could have already renamed it and we are retarded
  1064. futureRename, exists := analyser.renames[from]
  1065. if futureRename == "" && exists {
  1066. // the file will be deleted in the future, whatever
  1067. history = sparseHistory{}
  1068. } else {
  1069. history = analyser.fileHistories[futureRename]
  1070. if history == nil {
  1071. return fmt.Errorf("file %s > %s does not exist (histories)", from, to)
  1072. }
  1073. }
  1074. }
  1075. analyser.fileHistories[to] = history
  1076. delete(analyser.fileHistories, from)
  1077. }
  1078. analyser.renames[from] = to
  1079. return nil
  1080. }
  1081. func (analyser *BurndownAnalysis) groupSparseHistory(
  1082. history sparseHistory, lastDay int) (DenseHistory, int) {
  1083. if len(history) == 0 {
  1084. panic("empty history")
  1085. }
  1086. var days []int
  1087. for day := range history {
  1088. days = append(days, day)
  1089. }
  1090. sort.Ints(days)
  1091. if lastDay >= 0 {
  1092. if days[len(days)-1] < lastDay {
  1093. days = append(days, lastDay)
  1094. } else if days[len(days)-1] > lastDay {
  1095. panic("days corruption")
  1096. }
  1097. } else {
  1098. lastDay = days[len(days)-1]
  1099. }
  1100. // [y][x]
  1101. // y - sampling
  1102. // x - granularity
  1103. samples := lastDay / analyser.Sampling + 1
  1104. bands := lastDay / analyser.Granularity + 1
  1105. result := make(DenseHistory, samples)
  1106. for i := 0; i < bands; i++ {
  1107. result[i] = make([]int64, bands)
  1108. }
  1109. prevsi := 0
  1110. for _, day := range days {
  1111. si := day / analyser.Sampling
  1112. if si > prevsi {
  1113. state := result[prevsi]
  1114. for i := prevsi + 1; i <= si; i++ {
  1115. copy(result[i], state)
  1116. }
  1117. prevsi = si
  1118. }
  1119. sample := result[si]
  1120. for bday, value := range history[day] {
  1121. sample[bday / analyser.Granularity] += value
  1122. }
  1123. }
  1124. return result, lastDay
  1125. }
  1126. func init() {
  1127. core.Registry.Register(&BurndownAnalysis{})
  1128. }