burndown.go 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181
  1. package leaves
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "log"
  7. "sort"
  8. "sync"
  9. "unicode/utf8"
  10. "github.com/gogo/protobuf/proto"
  11. "github.com/sergi/go-diff/diffmatchpatch"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  16. "gopkg.in/src-d/hercules.v4/internal/burndown"
  17. "gopkg.in/src-d/hercules.v4/internal/core"
  18. "gopkg.in/src-d/hercules.v4/internal/pb"
  19. items "gopkg.in/src-d/hercules.v4/internal/plumbing"
  20. "gopkg.in/src-d/hercules.v4/internal/plumbing/identity"
  21. "gopkg.in/src-d/hercules.v4/yaml"
  22. )
  23. // BurndownAnalysis allows to gather the line burndown statistics for a Git repository.
  24. // It is a LeafPipelineItem.
  25. // Reference: https://erikbern.com/2016/12/05/the-half-life-of-code.html
  26. type BurndownAnalysis struct {
  27. // Granularity sets the size of each band - the number of days it spans.
  28. // Smaller values provide better resolution but require more work and eat more
  29. // memory. 30 days is usually enough.
  30. Granularity int
  31. // Sampling sets how detailed is the statistic - the size of the interval in
  32. // days between consecutive measurements. It may not be greater than Granularity. Try 15 or 30.
  33. Sampling int
  34. // TrackFiles enables or disables the fine-grained per-file burndown analysis.
  35. // It does not change the project level burndown results.
  36. TrackFiles bool
  37. // The number of developers for which to collect the burndown stats. 0 disables it.
  38. PeopleNumber int
  39. // Debug activates the debugging mode. Analyse() runs slower in this mode
  40. // but it accurately checks all the intermediate states for invariant
  41. // violations.
  42. Debug bool
  43. // Repository points to the analysed Git repository struct from go-git.
  44. repository *git.Repository
  45. // globalStatus is the current daily alive number of lines; key is the number
  46. // of days from the beginning of the history.
  47. globalStatus map[int]int64
  48. // globalHistory is the periodic snapshots of globalStatus.
  49. globalHistory [][]int64
  50. // fileHistories is the periodic snapshots of each file's status.
  51. fileHistories map[string][][]int64
  52. // peopleHistories is the periodic snapshots of each person's status.
  53. peopleHistories [][][]int64
  54. // files is the mapping <file path> -> *File.
  55. files map[string]*burndown.File
  56. // matrix is the mutual deletions and self insertions.
  57. matrix []map[int]int64
  58. // people is the people's individual time stats.
  59. people []map[int]int64
  60. // day is the most recent day index processed.
  61. day int
  62. // previousDay is the day from the previous sample period -
  63. // different from DaysSinceStart.previousDay.
  64. previousDay int
  65. // references IdentityDetector.ReversedPeopleDict
  66. reversedPeopleDict []string
  67. }
  68. // BurndownResult carries the result of running BurndownAnalysis - it is returned by
  69. // BurndownAnalysis.Finalize().
  70. type BurndownResult struct {
  71. // [number of samples][number of bands]
  72. // The number of samples depends on Sampling: the less Sampling, the bigger the number.
  73. // The number of bands depends on Granularity: the less Granularity, the bigger the number.
  74. GlobalHistory [][]int64
  75. // The key is the path inside the Git repository. The value's dimensions are the same as
  76. // in GlobalHistory.
  77. FileHistories map[string][][]int64
  78. // [number of people][number of samples][number of bands]
  79. PeopleHistories [][][]int64
  80. // [number of people][number of people + 2]
  81. // The first element is the total number of lines added by the author.
  82. // The second element is the number of removals by unidentified authors (outside reversedPeopleDict).
  83. // The rest of the elements are equal the number of line removals by the corresponding
  84. // authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.
  85. PeopleMatrix [][]int64
  86. // The following members are private.
  87. // reversedPeopleDict is borrowed from IdentityDetector and becomes available after
  88. // Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
  89. // facts[FactIdentityDetectorReversedPeopleDict].
  90. reversedPeopleDict []string
  91. // sampling and granularity are copied from BurndownAnalysis and stored for service purposes
  92. // such as merging several results together.
  93. sampling int
  94. granularity int
  95. }
  96. const (
  97. // ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.
  98. ConfigBurndownGranularity = "Burndown.Granularity"
  99. // ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.
  100. ConfigBurndownSampling = "Burndown.Sampling"
  101. // ConfigBurndownTrackFiles enables burndown collection for files.
  102. ConfigBurndownTrackFiles = "Burndown.TrackFiles"
  103. // ConfigBurndownTrackPeople enables burndown collection for authors.
  104. ConfigBurndownTrackPeople = "Burndown.TrackPeople"
  105. // ConfigBurndownDebug enables some extra debug assertions.
  106. ConfigBurndownDebug = "Burndown.Debug"
  107. // DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
  108. // and BurndownAnalysis.Sampling.
  109. DefaultBurndownGranularity = 30
  110. // authorSelf is the internal author index which is used in BurndownAnalysis.Finalize() to
  111. // format the author overwrites matrix.
  112. authorSelf = (1 << (32 - burndown.TreeMaxBinPower)) - 2
  113. )
  114. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  115. func (analyser *BurndownAnalysis) Name() string {
  116. return "Burndown"
  117. }
  118. // Provides returns the list of names of entities which are produced by this PipelineItem.
  119. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  120. // to this list. Also used by core.Registry to build the global map of providers.
  121. func (analyser *BurndownAnalysis) Provides() []string {
  122. return []string{}
  123. }
  124. // Requires returns the list of names of entities which are needed by this PipelineItem.
  125. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  126. // entities are Provides() upstream.
  127. func (analyser *BurndownAnalysis) Requires() []string {
  128. arr := [...]string{
  129. items.DependencyFileDiff, items.DependencyTreeChanges, items.DependencyBlobCache,
  130. items.DependencyDay, identity.DependencyAuthor}
  131. return arr[:]
  132. }
  133. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  134. func (analyser *BurndownAnalysis) ListConfigurationOptions() []core.ConfigurationOption {
  135. options := [...]core.ConfigurationOption{{
  136. Name: ConfigBurndownGranularity,
  137. Description: "How many days there are in a single band.",
  138. Flag: "granularity",
  139. Type: core.IntConfigurationOption,
  140. Default: DefaultBurndownGranularity}, {
  141. Name: ConfigBurndownSampling,
  142. Description: "How frequently to record the state in days.",
  143. Flag: "sampling",
  144. Type: core.IntConfigurationOption,
  145. Default: DefaultBurndownGranularity}, {
  146. Name: ConfigBurndownTrackFiles,
  147. Description: "Record detailed statistics per each file.",
  148. Flag: "burndown-files",
  149. Type: core.BoolConfigurationOption,
  150. Default: false}, {
  151. Name: ConfigBurndownTrackPeople,
  152. Description: "Record detailed statistics per each developer.",
  153. Flag: "burndown-people",
  154. Type: core.BoolConfigurationOption,
  155. Default: false}, {
  156. Name: ConfigBurndownDebug,
  157. Description: "Validate the trees on each step.",
  158. Flag: "burndown-debug",
  159. Type: core.BoolConfigurationOption,
  160. Default: false},
  161. }
  162. return options[:]
  163. }
  164. // Configure sets the properties previously published by ListConfigurationOptions().
  165. func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) {
  166. if val, exists := facts[ConfigBurndownGranularity].(int); exists {
  167. analyser.Granularity = val
  168. }
  169. if val, exists := facts[ConfigBurndownSampling].(int); exists {
  170. analyser.Sampling = val
  171. }
  172. if val, exists := facts[ConfigBurndownTrackFiles].(bool); exists {
  173. analyser.TrackFiles = val
  174. }
  175. if people, exists := facts[ConfigBurndownTrackPeople].(bool); people {
  176. if val, exists := facts[identity.FactIdentityDetectorPeopleCount].(int); exists {
  177. analyser.PeopleNumber = val
  178. analyser.reversedPeopleDict = facts[identity.FactIdentityDetectorReversedPeopleDict].([]string)
  179. }
  180. } else if exists {
  181. analyser.PeopleNumber = 0
  182. }
  183. if val, exists := facts[ConfigBurndownDebug].(bool); exists {
  184. analyser.Debug = val
  185. }
  186. }
  187. // Flag for the command line switch which enables this analysis.
  188. func (analyser *BurndownAnalysis) Flag() string {
  189. return "burndown"
  190. }
  191. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  192. // calls. The repository which is going to be analysed is supplied as an argument.
  193. func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) {
  194. if analyser.Granularity <= 0 {
  195. log.Printf("Warning: adjusted the granularity to %d days\n",
  196. DefaultBurndownGranularity)
  197. analyser.Granularity = DefaultBurndownGranularity
  198. }
  199. if analyser.Sampling <= 0 {
  200. log.Printf("Warning: adjusted the sampling to %d days\n",
  201. DefaultBurndownGranularity)
  202. analyser.Sampling = DefaultBurndownGranularity
  203. }
  204. if analyser.Sampling > analyser.Granularity {
  205. log.Printf("Warning: granularity may not be less than sampling, adjusted to %d\n",
  206. analyser.Granularity)
  207. analyser.Sampling = analyser.Granularity
  208. }
  209. analyser.repository = repository
  210. analyser.globalStatus = map[int]int64{}
  211. analyser.globalHistory = [][]int64{}
  212. analyser.fileHistories = map[string][][]int64{}
  213. analyser.peopleHistories = make([][][]int64, analyser.PeopleNumber)
  214. analyser.files = map[string]*burndown.File{}
  215. analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)
  216. analyser.people = make([]map[int]int64, analyser.PeopleNumber)
  217. analyser.day = 0
  218. analyser.previousDay = 0
  219. }
  220. // Consume runs this PipelineItem on the next commit data.
  221. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  222. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  223. // This function returns the mapping with analysis results. The keys must be the same as
  224. // in Provides(). If there was an error, nil is returned.
  225. func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  226. author := deps[identity.DependencyAuthor].(int)
  227. day := deps[items.DependencyDay].(int)
  228. if !core.IsMergeCommit(deps) {
  229. analyser.day = day
  230. analyser.onNewDay()
  231. } else {
  232. // effectively disables the status updates if the commit is a merge
  233. // we will analyse the conflicts resolution in Merge()
  234. analyser.day = burndown.TreeMergeMark
  235. }
  236. cache := deps[items.DependencyBlobCache].(map[plumbing.Hash]*object.Blob)
  237. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  238. fileDiffs := deps[items.DependencyFileDiff].(map[string]items.FileDiffData)
  239. for _, change := range treeDiffs {
  240. action, _ := change.Action()
  241. var err error
  242. switch action {
  243. case merkletrie.Insert:
  244. err = analyser.handleInsertion(change, author, cache)
  245. case merkletrie.Delete:
  246. err = analyser.handleDeletion(change, author, cache)
  247. case merkletrie.Modify:
  248. err = analyser.handleModification(change, author, cache, fileDiffs)
  249. }
  250. if err != nil {
  251. return nil, err
  252. }
  253. }
  254. // in case there is a merge analyser.day equals to TreeMergeMark
  255. analyser.day = day
  256. return nil, nil
  257. }
  258. // Fork clones this item. Everything is copied by reference except the files
  259. // which are copied by value.
  260. func (analyser *BurndownAnalysis) Fork(n int) []core.PipelineItem {
  261. result := make([]core.PipelineItem, n)
  262. for i := range result {
  263. clone := *analyser
  264. clone.files = map[string]*burndown.File{}
  265. for key, file := range analyser.files {
  266. clone.files[key] = file.Clone(false)
  267. }
  268. result[i] = &clone
  269. }
  270. return result
  271. }
  272. // Merge combines several items together. We apply the special file merging logic here.
  273. func (analyser *BurndownAnalysis) Merge(branches []core.PipelineItem) {
  274. for key, file := range analyser.files {
  275. others := make([]*burndown.File, len(branches))
  276. for i, branch := range branches {
  277. others[i] = branch.(*BurndownAnalysis).files[key]
  278. }
  279. // don't worry, we compare the hashes first before heavy-lifting
  280. if file.Merge(analyser.day, others...) {
  281. for _, branch := range branches {
  282. branch.(*BurndownAnalysis).files[key] = file.Clone(false)
  283. }
  284. }
  285. }
  286. analyser.onNewDay()
  287. }
  288. // Finalize returns the result of the analysis. Further Consume() calls are not expected.
  289. func (analyser *BurndownAnalysis) Finalize() interface{} {
  290. gs, fss, pss := analyser.groupStatus()
  291. analyser.updateHistories(1, gs, fss, pss)
  292. for key, statuses := range analyser.fileHistories {
  293. if len(statuses) == len(analyser.globalHistory) {
  294. continue
  295. }
  296. padding := make([][]int64, len(analyser.globalHistory)-len(statuses))
  297. for i := range padding {
  298. padding[i] = make([]int64, len(analyser.globalStatus))
  299. }
  300. analyser.fileHistories[key] = append(padding, statuses...)
  301. }
  302. peopleMatrix := make([][]int64, analyser.PeopleNumber)
  303. for i, row := range analyser.matrix {
  304. mrow := make([]int64, analyser.PeopleNumber+2)
  305. peopleMatrix[i] = mrow
  306. for key, val := range row {
  307. if key == identity.AuthorMissing {
  308. key = -1
  309. } else if key == authorSelf {
  310. key = -2
  311. }
  312. mrow[key+2] = val
  313. }
  314. }
  315. return BurndownResult{
  316. GlobalHistory: analyser.globalHistory,
  317. FileHistories: analyser.fileHistories,
  318. PeopleHistories: analyser.peopleHistories,
  319. PeopleMatrix: peopleMatrix,
  320. reversedPeopleDict: analyser.reversedPeopleDict,
  321. sampling: analyser.Sampling,
  322. granularity: analyser.Granularity,
  323. }
  324. }
  325. // Serialize converts the analysis result as returned by Finalize() to text or bytes.
  326. // The text format is YAML and the bytes format is Protocol Buffers.
  327. func (analyser *BurndownAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
  328. burndownResult := result.(BurndownResult)
  329. if binary {
  330. return analyser.serializeBinary(&burndownResult, writer)
  331. }
  332. analyser.serializeText(&burndownResult, writer)
  333. return nil
  334. }
  335. // Deserialize converts the specified protobuf bytes to BurndownResult.
  336. func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, error) {
  337. msg := pb.BurndownAnalysisResults{}
  338. err := proto.Unmarshal(pbmessage, &msg)
  339. if err != nil {
  340. return nil, err
  341. }
  342. result := BurndownResult{}
  343. convertCSR := func(mat *pb.BurndownSparseMatrix) [][]int64 {
  344. res := make([][]int64, mat.NumberOfRows)
  345. for i := 0; i < int(mat.NumberOfRows); i++ {
  346. res[i] = make([]int64, mat.NumberOfColumns)
  347. for j := 0; j < len(mat.Rows[i].Columns); j++ {
  348. res[i][j] = int64(mat.Rows[i].Columns[j])
  349. }
  350. }
  351. return res
  352. }
  353. result.GlobalHistory = convertCSR(msg.Project)
  354. result.FileHistories = map[string][][]int64{}
  355. for _, mat := range msg.Files {
  356. result.FileHistories[mat.Name] = convertCSR(mat)
  357. }
  358. result.reversedPeopleDict = make([]string, len(msg.People))
  359. result.PeopleHistories = make([][][]int64, len(msg.People))
  360. for i, mat := range msg.People {
  361. result.PeopleHistories[i] = convertCSR(mat)
  362. result.reversedPeopleDict[i] = mat.Name
  363. }
  364. if msg.PeopleInteraction != nil {
  365. result.PeopleMatrix = make([][]int64, msg.PeopleInteraction.NumberOfRows)
  366. }
  367. for i := 0; i < len(result.PeopleMatrix); i++ {
  368. result.PeopleMatrix[i] = make([]int64, msg.PeopleInteraction.NumberOfColumns)
  369. for j := int(msg.PeopleInteraction.Indptr[i]); j < int(msg.PeopleInteraction.Indptr[i+1]); j++ {
  370. result.PeopleMatrix[i][msg.PeopleInteraction.Indices[j]] = msg.PeopleInteraction.Data[j]
  371. }
  372. }
  373. result.sampling = int(msg.Sampling)
  374. result.granularity = int(msg.Granularity)
  375. return result, nil
  376. }
  377. // MergeResults combines two BurndownResult-s together.
  378. func (analyser *BurndownAnalysis) MergeResults(
  379. r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {
  380. bar1 := r1.(BurndownResult)
  381. bar2 := r2.(BurndownResult)
  382. merged := BurndownResult{}
  383. if bar1.sampling < bar2.sampling {
  384. merged.sampling = bar1.sampling
  385. } else {
  386. merged.sampling = bar2.sampling
  387. }
  388. if bar1.granularity < bar2.granularity {
  389. merged.granularity = bar1.granularity
  390. } else {
  391. merged.granularity = bar2.granularity
  392. }
  393. var people map[string][3]int
  394. people, merged.reversedPeopleDict = identity.Detector{}.MergeReversedDicts(
  395. bar1.reversedPeopleDict, bar2.reversedPeopleDict)
  396. var wg sync.WaitGroup
  397. if len(bar1.GlobalHistory) > 0 || len(bar2.GlobalHistory) > 0 {
  398. wg.Add(1)
  399. go func() {
  400. defer wg.Done()
  401. merged.GlobalHistory = mergeMatrices(
  402. bar1.GlobalHistory, bar2.GlobalHistory,
  403. bar1.granularity, bar1.sampling,
  404. bar2.granularity, bar2.sampling,
  405. c1, c2)
  406. }()
  407. }
  408. if len(bar1.FileHistories) > 0 || len(bar2.FileHistories) > 0 {
  409. merged.FileHistories = map[string][][]int64{}
  410. historyMutex := sync.Mutex{}
  411. for key, fh1 := range bar1.FileHistories {
  412. if fh2, exists := bar2.FileHistories[key]; exists {
  413. wg.Add(1)
  414. go func(fh1, fh2 [][]int64, key string) {
  415. defer wg.Done()
  416. historyMutex.Lock()
  417. defer historyMutex.Unlock()
  418. merged.FileHistories[key] = mergeMatrices(
  419. fh1, fh2, bar1.granularity, bar1.sampling, bar2.granularity, bar2.sampling, c1, c2)
  420. }(fh1, fh2, key)
  421. } else {
  422. historyMutex.Lock()
  423. merged.FileHistories[key] = fh1
  424. historyMutex.Unlock()
  425. }
  426. }
  427. for key, fh2 := range bar2.FileHistories {
  428. if _, exists := bar1.FileHistories[key]; !exists {
  429. historyMutex.Lock()
  430. merged.FileHistories[key] = fh2
  431. historyMutex.Unlock()
  432. }
  433. }
  434. }
  435. if len(merged.reversedPeopleDict) > 0 {
  436. merged.PeopleHistories = make([][][]int64, len(merged.reversedPeopleDict))
  437. for i, key := range merged.reversedPeopleDict {
  438. ptrs := people[key]
  439. if ptrs[1] < 0 {
  440. if len(bar2.PeopleHistories) > 0 {
  441. merged.PeopleHistories[i] = bar2.PeopleHistories[ptrs[2]]
  442. }
  443. } else if ptrs[2] < 0 {
  444. if len(bar1.PeopleHistories) > 0 {
  445. merged.PeopleHistories[i] = bar1.PeopleHistories[ptrs[1]]
  446. }
  447. } else {
  448. wg.Add(1)
  449. go func(i int) {
  450. defer wg.Done()
  451. var m1, m2 [][]int64
  452. if len(bar1.PeopleHistories) > 0 {
  453. m1 = bar1.PeopleHistories[ptrs[1]]
  454. }
  455. if len(bar2.PeopleHistories) > 0 {
  456. m2 = bar2.PeopleHistories[ptrs[2]]
  457. }
  458. merged.PeopleHistories[i] = mergeMatrices(
  459. m1, m2,
  460. bar1.granularity, bar1.sampling,
  461. bar2.granularity, bar2.sampling,
  462. c1, c2,
  463. )
  464. }(i)
  465. }
  466. }
  467. wg.Add(1)
  468. go func() {
  469. defer wg.Done()
  470. if len(bar2.PeopleMatrix) == 0 {
  471. merged.PeopleMatrix = bar1.PeopleMatrix
  472. // extend the matrix in both directions
  473. for i := 0; i < len(merged.PeopleMatrix); i++ {
  474. for j := len(bar1.reversedPeopleDict); j < len(merged.reversedPeopleDict); j++ {
  475. merged.PeopleMatrix[i] = append(merged.PeopleMatrix[i], 0)
  476. }
  477. }
  478. for i := len(bar1.reversedPeopleDict); i < len(merged.reversedPeopleDict); i++ {
  479. merged.PeopleMatrix = append(
  480. merged.PeopleMatrix, make([]int64, len(merged.reversedPeopleDict)+2))
  481. }
  482. } else {
  483. merged.PeopleMatrix = make([][]int64, len(merged.reversedPeopleDict))
  484. for i := range merged.PeopleMatrix {
  485. merged.PeopleMatrix[i] = make([]int64, len(merged.reversedPeopleDict)+2)
  486. }
  487. for i, key := range bar1.reversedPeopleDict {
  488. mi := people[key][0] // index in merged.reversedPeopleDict
  489. copy(merged.PeopleMatrix[mi][:2], bar1.PeopleMatrix[i][:2])
  490. for j, val := range bar1.PeopleMatrix[i][2:] {
  491. merged.PeopleMatrix[mi][2+people[bar1.reversedPeopleDict[j]][0]] = val
  492. }
  493. }
  494. for i, key := range bar2.reversedPeopleDict {
  495. mi := people[key][0] // index in merged.reversedPeopleDict
  496. merged.PeopleMatrix[mi][0] += bar2.PeopleMatrix[i][0]
  497. merged.PeopleMatrix[mi][1] += bar2.PeopleMatrix[i][1]
  498. for j, val := range bar2.PeopleMatrix[i][2:] {
  499. merged.PeopleMatrix[mi][2+people[bar2.reversedPeopleDict[j]][0]] += val
  500. }
  501. }
  502. }
  503. }()
  504. }
  505. wg.Wait()
  506. return merged
  507. }
  508. // mergeMatrices takes two [number of samples][number of bands] matrices,
  509. // resamples them to days so that they become square, sums and resamples back to the
  510. // least of (sampling1, sampling2) and (granularity1, granularity2).
  511. func mergeMatrices(m1, m2 [][]int64, granularity1, sampling1, granularity2, sampling2 int,
  512. c1, c2 *core.CommonAnalysisResult) [][]int64 {
  513. commonMerged := *c1
  514. commonMerged.Merge(c2)
  515. var granularity, sampling int
  516. if sampling1 < sampling2 {
  517. sampling = sampling1
  518. } else {
  519. sampling = sampling2
  520. }
  521. if granularity1 < granularity2 {
  522. granularity = granularity1
  523. } else {
  524. granularity = granularity2
  525. }
  526. size := int((commonMerged.EndTime - commonMerged.BeginTime) / (3600 * 24))
  527. daily := make([][]float32, size+granularity)
  528. for i := range daily {
  529. daily[i] = make([]float32, size+sampling)
  530. }
  531. if len(m1) > 0 {
  532. addBurndownMatrix(m1, granularity1, sampling1, daily,
  533. int(c1.BeginTime-commonMerged.BeginTime)/(3600*24))
  534. }
  535. if len(m2) > 0 {
  536. addBurndownMatrix(m2, granularity2, sampling2, daily,
  537. int(c2.BeginTime-commonMerged.BeginTime)/(3600*24))
  538. }
  539. // convert daily to [][]in(t64
  540. result := make([][]int64, (size+sampling-1)/sampling)
  541. for i := range result {
  542. result[i] = make([]int64, (size+granularity-1)/granularity)
  543. sampledIndex := i * sampling
  544. if i == len(result)-1 {
  545. sampledIndex = size - 1
  546. }
  547. for j := 0; j < len(result[i]); j++ {
  548. accum := float32(0)
  549. for k := j * granularity; k < (j+1)*granularity && k < size; k++ {
  550. accum += daily[sampledIndex][k]
  551. }
  552. result[i][j] = int64(accum)
  553. }
  554. }
  555. return result
  556. }
  557. // Explode `matrix` so that it is daily sampled and has daily bands, shift by `offset` days
  558. // and add to the accumulator. `daily` size is square and is guaranteed to fit `matrix` by
  559. // the caller.
  560. // Rows: *at least* len(matrix) * sampling + offset
  561. // Columns: *at least* len(matrix[...]) * granularity + offset
  562. // `matrix` can be sparse, so that the last columns which are equal to 0 are truncated.
  563. func addBurndownMatrix(matrix [][]int64, granularity, sampling int, daily [][]float32, offset int) {
  564. // Determine the maximum number of bands; the actual one may be larger but we do not care
  565. maxCols := 0
  566. for _, row := range matrix {
  567. if maxCols < len(row) {
  568. maxCols = len(row)
  569. }
  570. }
  571. neededRows := len(matrix)*sampling + offset
  572. if len(daily) < neededRows {
  573. panic(fmt.Sprintf("merge bug: too few daily rows: required %d, have %d",
  574. neededRows, len(daily)))
  575. }
  576. if len(daily[0]) < maxCols {
  577. panic(fmt.Sprintf("merge bug: too few daily cols: required %d, have %d",
  578. maxCols, len(daily[0])))
  579. }
  580. for x := 0; x < maxCols; x++ {
  581. for y := 0; y < len(matrix); y++ {
  582. if x*granularity > (y+1)*sampling {
  583. // the future is zeros
  584. continue
  585. }
  586. decay := func(startIndex int, startVal float32) {
  587. if startVal == 0 {
  588. return
  589. }
  590. k := float32(matrix[y][x]) / startVal // <= 1
  591. scale := float32((y+1)*sampling - startIndex)
  592. for i := x * granularity; i < (x+1)*granularity; i++ {
  593. initial := daily[startIndex-1+offset][i+offset]
  594. for j := startIndex; j < (y+1)*sampling; j++ {
  595. daily[j+offset][i+offset] = initial * (1 + (k-1)*float32(j-startIndex+1)/scale)
  596. }
  597. }
  598. }
  599. raise := func(finishIndex int, finishVal float32) {
  600. var initial float32
  601. if y > 0 {
  602. initial = float32(matrix[y-1][x])
  603. }
  604. startIndex := y * sampling
  605. if startIndex < x*granularity {
  606. startIndex = x * granularity
  607. }
  608. if startIndex == finishIndex {
  609. return
  610. }
  611. avg := (finishVal - initial) / float32(finishIndex-startIndex)
  612. for j := y * sampling; j < finishIndex; j++ {
  613. for i := startIndex; i <= j; i++ {
  614. daily[j+offset][i+offset] = avg
  615. }
  616. }
  617. // copy [x*g..y*s)
  618. for j := y * sampling; j < finishIndex; j++ {
  619. for i := x * granularity; i < y*sampling; i++ {
  620. daily[j+offset][i+offset] = daily[j-1+offset][i+offset]
  621. }
  622. }
  623. }
  624. if (x+1)*granularity >= (y+1)*sampling {
  625. // x*granularity <= (y+1)*sampling
  626. // 1. x*granularity <= y*sampling
  627. // y*sampling..(y+1)sampling
  628. //
  629. // x+1
  630. // /
  631. // /
  632. // / y+1 -|
  633. // / |
  634. // / y -|
  635. // /
  636. // / x
  637. //
  638. // 2. x*granularity > y*sampling
  639. // x*granularity..(y+1)sampling
  640. //
  641. // x+1
  642. // /
  643. // /
  644. // / y+1 -|
  645. // / |
  646. // / x -|
  647. // /
  648. // / y
  649. if x*granularity <= y*sampling {
  650. raise((y+1)*sampling, float32(matrix[y][x]))
  651. } else if (y+1)*sampling > x*granularity {
  652. raise((y+1)*sampling, float32(matrix[y][x]))
  653. avg := float32(matrix[y][x]) / float32((y+1)*sampling-x*granularity)
  654. for j := x * granularity; j < (y+1)*sampling; j++ {
  655. for i := x * granularity; i <= j; i++ {
  656. daily[j+offset][i+offset] = avg
  657. }
  658. }
  659. }
  660. } else if (x+1)*granularity >= y*sampling {
  661. // y*sampling <= (x+1)*granularity < (y+1)sampling
  662. // y*sampling..(x+1)*granularity
  663. // (x+1)*granularity..(y+1)sampling
  664. // x+1
  665. // /\
  666. // / \
  667. // / \
  668. // / y+1
  669. // /
  670. // y
  671. v1 := float32(matrix[y-1][x])
  672. v2 := float32(matrix[y][x])
  673. var peak float32
  674. delta := float32((x+1)*granularity - y*sampling)
  675. var scale float32
  676. var previous float32
  677. if y > 0 && (y-1)*sampling >= x*granularity {
  678. // x*g <= (y-1)*s <= y*s <= (x+1)*g <= (y+1)*s
  679. // |________|.......^
  680. if y > 1 {
  681. previous = float32(matrix[y-2][x])
  682. }
  683. scale = float32(sampling)
  684. } else {
  685. // (y-1)*s < x*g <= y*s <= (x+1)*g <= (y+1)*s
  686. // |______|.......^
  687. if y == 0 {
  688. scale = float32(sampling)
  689. } else {
  690. scale = float32(y*sampling - x*granularity)
  691. }
  692. }
  693. peak = v1 + (v1-previous)/scale*delta
  694. if v2 > peak {
  695. // we need to adjust the peak, it may not be less than the decayed value
  696. if y < len(matrix)-1 {
  697. // y*s <= (x+1)*g <= (y+1)*s < (y+2)*s
  698. // ^.........|_________|
  699. k := (v2 - float32(matrix[y+1][x])) / float32(sampling) // > 0
  700. peak = float32(matrix[y][x]) + k*float32((y+1)*sampling-(x+1)*granularity)
  701. // peak > v2 > v1
  702. } else {
  703. peak = v2
  704. // not enough data to interpolate; this is at least not restricted
  705. }
  706. }
  707. raise((x+1)*granularity, peak)
  708. decay((x+1)*granularity, peak)
  709. } else {
  710. // (x+1)*granularity < y*sampling
  711. // y*sampling..(y+1)sampling
  712. decay(y*sampling, float32(matrix[y-1][x]))
  713. }
  714. }
  715. }
  716. }
  717. func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
  718. fmt.Fprintln(writer, " granularity:", result.granularity)
  719. fmt.Fprintln(writer, " sampling:", result.sampling)
  720. yaml.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
  721. if len(result.FileHistories) > 0 {
  722. fmt.Fprintln(writer, " files:")
  723. keys := sortedKeys(result.FileHistories)
  724. for _, key := range keys {
  725. yaml.PrintMatrix(writer, result.FileHistories[key], 4, key, true)
  726. }
  727. }
  728. if len(result.PeopleHistories) > 0 {
  729. fmt.Fprintln(writer, " people_sequence:")
  730. for key := range result.PeopleHistories {
  731. fmt.Fprintln(writer, " - "+yaml.SafeString(result.reversedPeopleDict[key]))
  732. }
  733. fmt.Fprintln(writer, " people:")
  734. for key, val := range result.PeopleHistories {
  735. yaml.PrintMatrix(writer, val, 4, result.reversedPeopleDict[key], true)
  736. }
  737. fmt.Fprintln(writer, " people_interaction: |-")
  738. yaml.PrintMatrix(writer, result.PeopleMatrix, 4, "", false)
  739. }
  740. }
  741. func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer io.Writer) error {
  742. message := pb.BurndownAnalysisResults{
  743. Granularity: int32(result.granularity),
  744. Sampling: int32(result.sampling),
  745. }
  746. if len(result.GlobalHistory) > 0 {
  747. message.Project = pb.ToBurndownSparseMatrix(result.GlobalHistory, "project")
  748. }
  749. if len(result.FileHistories) > 0 {
  750. message.Files = make([]*pb.BurndownSparseMatrix, len(result.FileHistories))
  751. keys := sortedKeys(result.FileHistories)
  752. i := 0
  753. for _, key := range keys {
  754. message.Files[i] = pb.ToBurndownSparseMatrix(
  755. result.FileHistories[key], key)
  756. i++
  757. }
  758. }
  759. if len(result.PeopleHistories) > 0 {
  760. message.People = make(
  761. []*pb.BurndownSparseMatrix, len(result.PeopleHistories))
  762. for key, val := range result.PeopleHistories {
  763. if len(val) > 0 {
  764. message.People[key] = pb.ToBurndownSparseMatrix(val, result.reversedPeopleDict[key])
  765. }
  766. }
  767. message.PeopleInteraction = pb.DenseToCompressedSparseRowMatrix(result.PeopleMatrix)
  768. }
  769. serialized, err := proto.Marshal(&message)
  770. if err != nil {
  771. return err
  772. }
  773. writer.Write(serialized)
  774. return nil
  775. }
  776. func sortedKeys(m map[string][][]int64) []string {
  777. keys := make([]string, 0, len(m))
  778. for k := range m {
  779. keys = append(keys, k)
  780. }
  781. sort.Strings(keys)
  782. return keys
  783. }
  784. func checkClose(c io.Closer) {
  785. if err := c.Close(); err != nil {
  786. panic(err)
  787. }
  788. }
  789. // We do a hack and store the day in the first 14 bits and the author index in the last 18.
  790. // Strictly speaking, int can be 64-bit and then the author index occupies 32+18 bits.
  791. // This hack is needed to simplify the values storage inside File-s. We can compare
  792. // different values together and they are compared as days for the same author.
  793. func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {
  794. if analyser.PeopleNumber == 0 {
  795. return day
  796. }
  797. result := day & burndown.TreeMergeMark
  798. result |= person << burndown.TreeMaxBinPower
  799. // This effectively means max (16383 - 1) days (>44 years) and (131072 - 2) devs.
  800. // One day less because burndown.TreeMergeMark = ((1 << 14) - 1) is a special day.
  801. return result
  802. }
  803. func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {
  804. if analyser.PeopleNumber == 0 {
  805. return identity.AuthorMissing, value
  806. }
  807. return value >> burndown.TreeMaxBinPower, value & burndown.TreeMergeMark
  808. }
  809. func (analyser *BurndownAnalysis) onNewDay() {
  810. day := analyser.day
  811. sampling := analyser.Sampling
  812. delta := (day / sampling) - (analyser.previousDay / sampling)
  813. if delta > 0 {
  814. analyser.previousDay = day
  815. gs, fss, pss := analyser.groupStatus()
  816. analyser.updateHistories(delta, gs, fss, pss)
  817. }
  818. }
  819. func (analyser *BurndownAnalysis) updateStatus(
  820. status interface{}, _ int, previousValue int, delta int) {
  821. _, previousTime := analyser.unpackPersonWithDay(previousValue)
  822. status.(map[int]int64)[previousTime] += int64(delta)
  823. }
  824. func (analyser *BurndownAnalysis) updatePeople(
  825. peopleUncasted interface{}, _ int, previousValue int, delta int) {
  826. previousAuthor, previousTime := analyser.unpackPersonWithDay(previousValue)
  827. if previousAuthor == identity.AuthorMissing {
  828. return
  829. }
  830. people := peopleUncasted.([]map[int]int64)
  831. stats := people[previousAuthor]
  832. if stats == nil {
  833. stats = map[int]int64{}
  834. people[previousAuthor] = stats
  835. }
  836. stats[previousTime] += int64(delta)
  837. }
  838. func (analyser *BurndownAnalysis) updateMatrix(
  839. matrixUncasted interface{}, currentTime int, previousTime int, delta int) {
  840. matrix := matrixUncasted.([]map[int]int64)
  841. newAuthor, _ := analyser.unpackPersonWithDay(currentTime)
  842. oldAuthor, _ := analyser.unpackPersonWithDay(previousTime)
  843. if oldAuthor == identity.AuthorMissing {
  844. return
  845. }
  846. if newAuthor == oldAuthor && delta > 0 {
  847. newAuthor = authorSelf
  848. }
  849. row := matrix[oldAuthor]
  850. if row == nil {
  851. row = map[int]int64{}
  852. matrix[oldAuthor] = row
  853. }
  854. cell, exists := row[newAuthor]
  855. if !exists {
  856. row[newAuthor] = 0
  857. cell = 0
  858. }
  859. row[newAuthor] = cell + int64(delta)
  860. }
  861. func (analyser *BurndownAnalysis) newFile(
  862. hash plumbing.Hash, author int, day int, size int, global map[int]int64,
  863. people []map[int]int64, matrix []map[int]int64) *burndown.File {
  864. statuses := make([]burndown.Status, 1)
  865. statuses[0] = burndown.NewStatus(global, analyser.updateStatus)
  866. if analyser.TrackFiles {
  867. statuses = append(statuses, burndown.NewStatus(map[int]int64{}, analyser.updateStatus))
  868. }
  869. if analyser.PeopleNumber > 0 {
  870. statuses = append(statuses, burndown.NewStatus(people, analyser.updatePeople))
  871. statuses = append(statuses, burndown.NewStatus(matrix, analyser.updateMatrix))
  872. day = analyser.packPersonWithDay(author, day)
  873. }
  874. return burndown.NewFile(hash, day, size, statuses...)
  875. }
  876. func (analyser *BurndownAnalysis) handleInsertion(
  877. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  878. blob := cache[change.To.TreeEntry.Hash]
  879. lines, err := items.CountLines(blob)
  880. if err != nil {
  881. if err.Error() == "binary" {
  882. return nil
  883. }
  884. return err
  885. }
  886. name := change.To.Name
  887. file, exists := analyser.files[name]
  888. if exists {
  889. return fmt.Errorf("file %s already exists", name)
  890. }
  891. file = analyser.newFile(
  892. blob.Hash, author, analyser.day, lines,
  893. analyser.globalStatus, analyser.people, analyser.matrix)
  894. analyser.files[name] = file
  895. return nil
  896. }
  897. func (analyser *BurndownAnalysis) handleDeletion(
  898. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  899. blob := cache[change.From.TreeEntry.Hash]
  900. lines, err := items.CountLines(blob)
  901. if err != nil {
  902. if err.Error() == "binary" {
  903. return nil
  904. }
  905. return err
  906. }
  907. name := change.From.Name
  908. file := analyser.files[name]
  909. file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)
  910. file.Hash = plumbing.ZeroHash
  911. delete(analyser.files, name)
  912. return nil
  913. }
  914. func (analyser *BurndownAnalysis) handleModification(
  915. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob,
  916. diffs map[string]items.FileDiffData) error {
  917. file, exists := analyser.files[change.From.Name]
  918. if !exists {
  919. // this indeed may happen
  920. return analyser.handleInsertion(change, author, cache)
  921. }
  922. file.Hash = change.To.TreeEntry.Hash
  923. // possible rename
  924. if change.To.Name != change.From.Name {
  925. err := analyser.handleRename(change.From.Name, change.To.Name)
  926. if err != nil {
  927. return err
  928. }
  929. }
  930. thisDiffs := diffs[change.To.Name]
  931. if file.Len() != thisDiffs.OldLinesOfCode {
  932. log.Printf("====TREE====\n%s", file.Dump())
  933. return fmt.Errorf("%s: internal integrity error src %d != %d %s -> %s",
  934. change.To.Name, thisDiffs.OldLinesOfCode, file.Len(),
  935. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String())
  936. }
  937. // we do not call RunesToDiffLines so the number of lines equals
  938. // to the rune count
  939. position := 0
  940. pending := diffmatchpatch.Diff{Text: ""}
  941. apply := func(edit diffmatchpatch.Diff) {
  942. length := utf8.RuneCountInString(edit.Text)
  943. if edit.Type == diffmatchpatch.DiffInsert {
  944. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)
  945. position += length
  946. } else {
  947. file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)
  948. }
  949. if analyser.Debug {
  950. file.Validate()
  951. }
  952. }
  953. for _, edit := range thisDiffs.Diffs {
  954. dumpBefore := ""
  955. if analyser.Debug {
  956. dumpBefore = file.Dump()
  957. }
  958. length := utf8.RuneCountInString(edit.Text)
  959. debugError := func() {
  960. log.Printf("%s: internal diff error\n", change.To.Name)
  961. log.Printf("Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,
  962. length, utf8.RuneCountInString(pending.Text))
  963. if dumpBefore != "" {
  964. log.Printf("====TREE BEFORE====\n%s====END====\n", dumpBefore)
  965. }
  966. log.Printf("====TREE AFTER====\n%s====END====\n", file.Dump())
  967. }
  968. switch edit.Type {
  969. case diffmatchpatch.DiffEqual:
  970. if pending.Text != "" {
  971. apply(pending)
  972. pending.Text = ""
  973. }
  974. position += length
  975. case diffmatchpatch.DiffInsert:
  976. if pending.Text != "" {
  977. if pending.Type == diffmatchpatch.DiffInsert {
  978. debugError()
  979. return errors.New("DiffInsert may not appear after DiffInsert")
  980. }
  981. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,
  982. utf8.RuneCountInString(pending.Text))
  983. if analyser.Debug {
  984. file.Validate()
  985. }
  986. position += length
  987. pending.Text = ""
  988. } else {
  989. pending = edit
  990. }
  991. case diffmatchpatch.DiffDelete:
  992. if pending.Text != "" {
  993. debugError()
  994. return errors.New("DiffDelete may not appear after DiffInsert/DiffDelete")
  995. }
  996. pending = edit
  997. default:
  998. debugError()
  999. return fmt.Errorf("diff operation is not supported: %d", edit.Type)
  1000. }
  1001. }
  1002. if pending.Text != "" {
  1003. apply(pending)
  1004. pending.Text = ""
  1005. }
  1006. if file.Len() != thisDiffs.NewLinesOfCode {
  1007. return fmt.Errorf("%s: internal integrity error dst %d != %d",
  1008. change.To.Name, thisDiffs.NewLinesOfCode, file.Len())
  1009. }
  1010. return nil
  1011. }
  1012. func (analyser *BurndownAnalysis) handleRename(from, to string) error {
  1013. file, exists := analyser.files[from]
  1014. if !exists {
  1015. return fmt.Errorf("file %s does not exist", from)
  1016. }
  1017. analyser.files[to] = file
  1018. delete(analyser.files, from)
  1019. return nil
  1020. }
  1021. func (analyser *BurndownAnalysis) groupStatus() ([]int64, map[string][]int64, [][]int64) {
  1022. granularity := analyser.Granularity
  1023. if granularity == 0 {
  1024. granularity = 1
  1025. }
  1026. day := analyser.day
  1027. day++
  1028. adjust := 0
  1029. if day%granularity != 0 {
  1030. adjust = 1
  1031. }
  1032. global := make([]int64, day/granularity+adjust)
  1033. var group int64
  1034. for i := 0; i < day; i++ {
  1035. group += analyser.globalStatus[i]
  1036. if (i % granularity) == (granularity - 1) {
  1037. global[i/granularity] = group
  1038. group = 0
  1039. }
  1040. }
  1041. if day%granularity != 0 {
  1042. global[len(global)-1] = group
  1043. }
  1044. locals := make(map[string][]int64)
  1045. if analyser.TrackFiles {
  1046. for key, file := range analyser.files {
  1047. status := make([]int64, day/granularity+adjust)
  1048. var group int64
  1049. for i := 0; i < day; i++ {
  1050. group += file.Status(1).(map[int]int64)[i]
  1051. if (i % granularity) == (granularity - 1) {
  1052. status[i/granularity] = group
  1053. group = 0
  1054. }
  1055. }
  1056. if day%granularity != 0 {
  1057. status[len(status)-1] = group
  1058. }
  1059. locals[key] = status
  1060. }
  1061. }
  1062. peoples := make([][]int64, len(analyser.people))
  1063. for key, person := range analyser.people {
  1064. status := make([]int64, day/granularity+adjust)
  1065. var group int64
  1066. for i := 0; i < day; i++ {
  1067. group += person[i]
  1068. if (i % granularity) == (granularity - 1) {
  1069. status[i/granularity] = group
  1070. group = 0
  1071. }
  1072. }
  1073. if day%granularity != 0 {
  1074. status[len(status)-1] = group
  1075. }
  1076. peoples[key] = status
  1077. }
  1078. return global, locals, peoples
  1079. }
  1080. func (analyser *BurndownAnalysis) updateHistories(
  1081. delta int, globalStatus []int64, fileStatuses map[string][]int64, peopleStatuses [][]int64) {
  1082. for i := 0; i < delta; i++ {
  1083. analyser.globalHistory = append(analyser.globalHistory, globalStatus)
  1084. }
  1085. toDelete := make([]string, 0)
  1086. for key, fh := range analyser.fileHistories {
  1087. ls, exists := fileStatuses[key]
  1088. if !exists {
  1089. toDelete = append(toDelete, key)
  1090. } else {
  1091. for i := 0; i < delta; i++ {
  1092. fh = append(fh, ls)
  1093. }
  1094. analyser.fileHistories[key] = fh
  1095. }
  1096. }
  1097. for _, key := range toDelete {
  1098. delete(analyser.fileHistories, key)
  1099. }
  1100. for key, ls := range fileStatuses {
  1101. fh, exists := analyser.fileHistories[key]
  1102. if exists {
  1103. continue
  1104. }
  1105. for i := 0; i < delta; i++ {
  1106. fh = append(fh, ls)
  1107. }
  1108. analyser.fileHistories[key] = fh
  1109. }
  1110. for key, ph := range analyser.peopleHistories {
  1111. ls := peopleStatuses[key]
  1112. for i := 0; i < delta; i++ {
  1113. ph = append(ph, ls)
  1114. }
  1115. analyser.peopleHistories[key] = ph
  1116. }
  1117. }
  1118. func init() {
  1119. core.Registry.Register(&BurndownAnalysis{})
  1120. }