burndown.go 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. package leaves
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "log"
  7. "sort"
  8. "sync"
  9. "unicode/utf8"
  10. "github.com/gogo/protobuf/proto"
  11. "github.com/sergi/go-diff/diffmatchpatch"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  16. "gopkg.in/src-d/hercules.v4/internal/burndown"
  17. "gopkg.in/src-d/hercules.v4/internal/core"
  18. "gopkg.in/src-d/hercules.v4/internal/pb"
  19. items "gopkg.in/src-d/hercules.v4/internal/plumbing"
  20. "gopkg.in/src-d/hercules.v4/internal/plumbing/identity"
  21. "gopkg.in/src-d/hercules.v4/yaml"
  22. )
  23. // BurndownAnalysis allows to gather the line burndown statistics for a Git repository.
  24. // It is a LeafPipelineItem.
  25. // Reference: https://erikbern.com/2016/12/05/the-half-life-of-code.html
  26. type BurndownAnalysis struct {
  27. // Granularity sets the size of each band - the number of days it spans.
  28. // Smaller values provide better resolution but require more work and eat more
  29. // memory. 30 days is usually enough.
  30. Granularity int
  31. // Sampling sets how detailed is the statistic - the size of the interval in
  32. // days between consecutive measurements. It may not be greater than Granularity. Try 15 or 30.
  33. Sampling int
  34. // TrackFiles enables or disables the fine-grained per-file burndown analysis.
  35. // It does not change the project level burndown results.
  36. TrackFiles bool
  37. // The number of developers for which to collect the burndown stats. 0 disables it.
  38. PeopleNumber int
  39. // Debug activates the debugging mode. Analyse() runs slower in this mode
  40. // but it accurately checks all the intermediate states for invariant
  41. // violations.
  42. Debug bool
  43. // Repository points to the analysed Git repository struct from go-git.
  44. repository *git.Repository
  45. // globalStatus is the current daily alive number of lines; key is the number
  46. // of days from the beginning of the history.
  47. globalStatus map[int]int64
  48. // globalHistory is the periodic snapshots of globalStatus.
  49. globalHistory [][]int64
  50. // fileHistories is the periodic snapshots of each file's status.
  51. fileHistories map[string][][]int64
  52. // peopleHistories is the periodic snapshots of each person's status.
  53. peopleHistories [][][]int64
  54. // files is the mapping <file path> -> *File.
  55. files map[string]*burndown.File
  56. // matrix is the mutual deletions and self insertions.
  57. matrix []map[int]int64
  58. // people is the people's individual time stats.
  59. people []map[int]int64
  60. // day is the most recent day index processed.
  61. day int
  62. // previousDay is the day from the previous sample period -
  63. // different from DaysSinceStart.previousDay.
  64. previousDay int
  65. // references IdentityDetector.ReversedPeopleDict
  66. reversedPeopleDict []string
  67. }
  68. // BurndownResult carries the result of running BurndownAnalysis - it is returned by
  69. // BurndownAnalysis.Finalize().
  70. type BurndownResult struct {
  71. // [number of samples][number of bands]
  72. // The number of samples depends on Sampling: the less Sampling, the bigger the number.
  73. // The number of bands depends on Granularity: the less Granularity, the bigger the number.
  74. GlobalHistory [][]int64
  75. // The key is the path inside the Git repository. The value's dimensions are the same as
  76. // in GlobalHistory.
  77. FileHistories map[string][][]int64
  78. // [number of people][number of samples][number of bands]
  79. PeopleHistories [][][]int64
  80. // [number of people][number of people + 2]
  81. // The first element is the total number of lines added by the author.
  82. // The second element is the number of removals by unidentified authors (outside reversedPeopleDict).
  83. // The rest of the elements are equal the number of line removals by the corresponding
  84. // authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.
  85. PeopleMatrix [][]int64
  86. // The following members are private.
  87. // reversedPeopleDict is borrowed from IdentityDetector and becomes available after
  88. // Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
  89. // facts[FactIdentityDetectorReversedPeopleDict].
  90. reversedPeopleDict []string
  91. // sampling and granularity are copied from BurndownAnalysis and stored for service purposes
  92. // such as merging several results together.
  93. sampling int
  94. granularity int
  95. }
  96. const (
  97. // ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.
  98. ConfigBurndownGranularity = "Burndown.Granularity"
  99. // ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.
  100. ConfigBurndownSampling = "Burndown.Sampling"
  101. // ConfigBurndownTrackFiles enables burndown collection for files.
  102. ConfigBurndownTrackFiles = "Burndown.TrackFiles"
  103. // ConfigBurndownTrackPeople enables burndown collection for authors.
  104. ConfigBurndownTrackPeople = "Burndown.TrackPeople"
  105. // ConfigBurndownDebug enables some extra debug assertions.
  106. ConfigBurndownDebug = "Burndown.Debug"
  107. // DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
  108. // and BurndownAnalysis.Sampling.
  109. DefaultBurndownGranularity = 30
  110. // authorSelf is the internal author index which is used in BurndownAnalysis.Finalize() to
  111. // format the author overwrites matrix.
  112. authorSelf = (1 << (32 - burndown.TreeMaxBinPower)) - 2
  113. )
  114. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  115. func (analyser *BurndownAnalysis) Name() string {
  116. return "Burndown"
  117. }
  118. // Provides returns the list of names of entities which are produced by this PipelineItem.
  119. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  120. // to this list. Also used by core.Registry to build the global map of providers.
  121. func (analyser *BurndownAnalysis) Provides() []string {
  122. return []string{}
  123. }
  124. // Requires returns the list of names of entities which are needed by this PipelineItem.
  125. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  126. // entities are Provides() upstream.
  127. func (analyser *BurndownAnalysis) Requires() []string {
  128. arr := [...]string{
  129. items.DependencyFileDiff, items.DependencyTreeChanges, items.DependencyBlobCache,
  130. items.DependencyDay, identity.DependencyAuthor}
  131. return arr[:]
  132. }
  133. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  134. func (analyser *BurndownAnalysis) ListConfigurationOptions() []core.ConfigurationOption {
  135. options := [...]core.ConfigurationOption{{
  136. Name: ConfigBurndownGranularity,
  137. Description: "How many days there are in a single band.",
  138. Flag: "granularity",
  139. Type: core.IntConfigurationOption,
  140. Default: DefaultBurndownGranularity}, {
  141. Name: ConfigBurndownSampling,
  142. Description: "How frequently to record the state in days.",
  143. Flag: "sampling",
  144. Type: core.IntConfigurationOption,
  145. Default: DefaultBurndownGranularity}, {
  146. Name: ConfigBurndownTrackFiles,
  147. Description: "Record detailed statistics per each file.",
  148. Flag: "burndown-files",
  149. Type: core.BoolConfigurationOption,
  150. Default: false}, {
  151. Name: ConfigBurndownTrackPeople,
  152. Description: "Record detailed statistics per each developer.",
  153. Flag: "burndown-people",
  154. Type: core.BoolConfigurationOption,
  155. Default: false}, {
  156. Name: ConfigBurndownDebug,
  157. Description: "Validate the trees on each step.",
  158. Flag: "burndown-debug",
  159. Type: core.BoolConfigurationOption,
  160. Default: false},
  161. }
  162. return options[:]
  163. }
  164. // Configure sets the properties previously published by ListConfigurationOptions().
  165. func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) {
  166. if val, exists := facts[ConfigBurndownGranularity].(int); exists {
  167. analyser.Granularity = val
  168. }
  169. if val, exists := facts[ConfigBurndownSampling].(int); exists {
  170. analyser.Sampling = val
  171. }
  172. if val, exists := facts[ConfigBurndownTrackFiles].(bool); exists {
  173. analyser.TrackFiles = val
  174. }
  175. if people, exists := facts[ConfigBurndownTrackPeople].(bool); people {
  176. if val, exists := facts[identity.FactIdentityDetectorPeopleCount].(int); exists {
  177. analyser.PeopleNumber = val
  178. analyser.reversedPeopleDict = facts[identity.FactIdentityDetectorReversedPeopleDict].([]string)
  179. }
  180. } else if exists {
  181. analyser.PeopleNumber = 0
  182. }
  183. if val, exists := facts[ConfigBurndownDebug].(bool); exists {
  184. analyser.Debug = val
  185. }
  186. }
  187. // Flag for the command line switch which enables this analysis.
  188. func (analyser *BurndownAnalysis) Flag() string {
  189. return "burndown"
  190. }
  191. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  192. // calls. The repository which is going to be analysed is supplied as an argument.
  193. func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) {
  194. if analyser.Granularity <= 0 {
  195. log.Printf("Warning: adjusted the granularity to %d days\n",
  196. DefaultBurndownGranularity)
  197. analyser.Granularity = DefaultBurndownGranularity
  198. }
  199. if analyser.Sampling <= 0 {
  200. log.Printf("Warning: adjusted the sampling to %d days\n",
  201. DefaultBurndownGranularity)
  202. analyser.Sampling = DefaultBurndownGranularity
  203. }
  204. if analyser.Sampling > analyser.Granularity {
  205. log.Printf("Warning: granularity may not be less than sampling, adjusted to %d\n",
  206. analyser.Granularity)
  207. analyser.Sampling = analyser.Granularity
  208. }
  209. analyser.repository = repository
  210. analyser.globalStatus = map[int]int64{}
  211. analyser.globalHistory = [][]int64{}
  212. analyser.fileHistories = map[string][][]int64{}
  213. analyser.peopleHistories = make([][][]int64, analyser.PeopleNumber)
  214. analyser.files = map[string]*burndown.File{}
  215. analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)
  216. analyser.people = make([]map[int]int64, analyser.PeopleNumber)
  217. analyser.day = 0
  218. analyser.previousDay = 0
  219. }
  220. // Consume runs this PipelineItem on the next commit data.
  221. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  222. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  223. // This function returns the mapping with analysis results. The keys must be the same as
  224. // in Provides(). If there was an error, nil is returned.
  225. func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  226. commit := deps[core.DependencyCommit].(*object.Commit)
  227. author := deps[identity.DependencyAuthor].(int)
  228. day := deps[items.DependencyDay].(int)
  229. if len(commit.ParentHashes) == 1 {
  230. analyser.day = day
  231. analyser.onNewDay()
  232. } else {
  233. // effectively disables the status updates if the commit is a merge
  234. // we will analyse the conflicts resolution in Merge()
  235. analyser.day = burndown.TreeMergeMark
  236. }
  237. cache := deps[items.DependencyBlobCache].(map[plumbing.Hash]*object.Blob)
  238. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  239. fileDiffs := deps[items.DependencyFileDiff].(map[string]items.FileDiffData)
  240. for _, change := range treeDiffs {
  241. action, _ := change.Action()
  242. var err error
  243. switch action {
  244. case merkletrie.Insert:
  245. err = analyser.handleInsertion(change, author, cache)
  246. case merkletrie.Delete:
  247. err = analyser.handleDeletion(change, author, cache)
  248. case merkletrie.Modify:
  249. err = analyser.handleModification(change, author, cache, fileDiffs)
  250. }
  251. if err != nil {
  252. return nil, err
  253. }
  254. }
  255. // in case there is a merge analyser.day equals to TreeMergeMark
  256. analyser.day = day
  257. return nil, nil
  258. }
  259. func (analyser *BurndownAnalysis) Fork(n int) []core.PipelineItem {
  260. result := make([]core.PipelineItem, n)
  261. for i := range result {
  262. clone := *analyser
  263. clone.files = map[string]*burndown.File{}
  264. for key, file := range analyser.files {
  265. clone.files[key] = file.Clone(false)
  266. }
  267. result[i] = &clone
  268. }
  269. return result
  270. }
  271. func (analyser *BurndownAnalysis) Merge(branches []core.PipelineItem) {
  272. for key, file := range analyser.files {
  273. others := make([]*burndown.File, len(branches))
  274. for i, branch := range branches {
  275. others[i] = branch.(*BurndownAnalysis).files[key]
  276. }
  277. // don't worry, we compare the hashes first before heavy-lifting
  278. if file.Merge(analyser.day, others...) {
  279. for _, branch := range branches {
  280. branch.(*BurndownAnalysis).files[key] = file.Clone(false)
  281. }
  282. }
  283. }
  284. analyser.onNewDay()
  285. }
  286. // Finalize returns the result of the analysis. Further Consume() calls are not expected.
  287. func (analyser *BurndownAnalysis) Finalize() interface{} {
  288. gs, fss, pss := analyser.groupStatus()
  289. analyser.updateHistories(1, gs, fss, pss)
  290. for key, statuses := range analyser.fileHistories {
  291. if len(statuses) == len(analyser.globalHistory) {
  292. continue
  293. }
  294. padding := make([][]int64, len(analyser.globalHistory)-len(statuses))
  295. for i := range padding {
  296. padding[i] = make([]int64, len(analyser.globalStatus))
  297. }
  298. analyser.fileHistories[key] = append(padding, statuses...)
  299. }
  300. peopleMatrix := make([][]int64, analyser.PeopleNumber)
  301. for i, row := range analyser.matrix {
  302. mrow := make([]int64, analyser.PeopleNumber+2)
  303. peopleMatrix[i] = mrow
  304. for key, val := range row {
  305. if key == identity.AuthorMissing {
  306. key = -1
  307. } else if key == authorSelf {
  308. key = -2
  309. }
  310. mrow[key+2] = val
  311. }
  312. }
  313. return BurndownResult{
  314. GlobalHistory: analyser.globalHistory,
  315. FileHistories: analyser.fileHistories,
  316. PeopleHistories: analyser.peopleHistories,
  317. PeopleMatrix: peopleMatrix,
  318. reversedPeopleDict: analyser.reversedPeopleDict,
  319. sampling: analyser.Sampling,
  320. granularity: analyser.Granularity,
  321. }
  322. }
  323. // Serialize converts the analysis result as returned by Finalize() to text or bytes.
  324. // The text format is YAML and the bytes format is Protocol Buffers.
  325. func (analyser *BurndownAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
  326. burndownResult := result.(BurndownResult)
  327. if binary {
  328. return analyser.serializeBinary(&burndownResult, writer)
  329. }
  330. analyser.serializeText(&burndownResult, writer)
  331. return nil
  332. }
  333. // Deserialize converts the specified protobuf bytes to BurndownResult.
  334. func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, error) {
  335. msg := pb.BurndownAnalysisResults{}
  336. err := proto.Unmarshal(pbmessage, &msg)
  337. if err != nil {
  338. return nil, err
  339. }
  340. result := BurndownResult{}
  341. convertCSR := func(mat *pb.BurndownSparseMatrix) [][]int64 {
  342. res := make([][]int64, mat.NumberOfRows)
  343. for i := 0; i < int(mat.NumberOfRows); i++ {
  344. res[i] = make([]int64, mat.NumberOfColumns)
  345. for j := 0; j < len(mat.Rows[i].Columns); j++ {
  346. res[i][j] = int64(mat.Rows[i].Columns[j])
  347. }
  348. }
  349. return res
  350. }
  351. result.GlobalHistory = convertCSR(msg.Project)
  352. result.FileHistories = map[string][][]int64{}
  353. for _, mat := range msg.Files {
  354. result.FileHistories[mat.Name] = convertCSR(mat)
  355. }
  356. result.reversedPeopleDict = make([]string, len(msg.People))
  357. result.PeopleHistories = make([][][]int64, len(msg.People))
  358. for i, mat := range msg.People {
  359. result.PeopleHistories[i] = convertCSR(mat)
  360. result.reversedPeopleDict[i] = mat.Name
  361. }
  362. if msg.PeopleInteraction != nil {
  363. result.PeopleMatrix = make([][]int64, msg.PeopleInteraction.NumberOfRows)
  364. }
  365. for i := 0; i < len(result.PeopleMatrix); i++ {
  366. result.PeopleMatrix[i] = make([]int64, msg.PeopleInteraction.NumberOfColumns)
  367. for j := int(msg.PeopleInteraction.Indptr[i]); j < int(msg.PeopleInteraction.Indptr[i+1]); j++ {
  368. result.PeopleMatrix[i][msg.PeopleInteraction.Indices[j]] = msg.PeopleInteraction.Data[j]
  369. }
  370. }
  371. result.sampling = int(msg.Sampling)
  372. result.granularity = int(msg.Granularity)
  373. return result, nil
  374. }
  375. // MergeResults combines two BurndownResult-s together.
  376. func (analyser *BurndownAnalysis) MergeResults(
  377. r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {
  378. bar1 := r1.(BurndownResult)
  379. bar2 := r2.(BurndownResult)
  380. merged := BurndownResult{}
  381. if bar1.sampling < bar2.sampling {
  382. merged.sampling = bar1.sampling
  383. } else {
  384. merged.sampling = bar2.sampling
  385. }
  386. if bar1.granularity < bar2.granularity {
  387. merged.granularity = bar1.granularity
  388. } else {
  389. merged.granularity = bar2.granularity
  390. }
  391. var people map[string][3]int
  392. people, merged.reversedPeopleDict = identity.Detector{}.MergeReversedDicts(
  393. bar1.reversedPeopleDict, bar2.reversedPeopleDict)
  394. var wg sync.WaitGroup
  395. if len(bar1.GlobalHistory) > 0 || len(bar2.GlobalHistory) > 0 {
  396. wg.Add(1)
  397. go func() {
  398. defer wg.Done()
  399. merged.GlobalHistory = mergeMatrices(
  400. bar1.GlobalHistory, bar2.GlobalHistory,
  401. bar1.granularity, bar1.sampling,
  402. bar2.granularity, bar2.sampling,
  403. c1, c2)
  404. }()
  405. }
  406. if len(bar1.FileHistories) > 0 || len(bar2.FileHistories) > 0 {
  407. merged.FileHistories = map[string][][]int64{}
  408. historyMutex := sync.Mutex{}
  409. for key, fh1 := range bar1.FileHistories {
  410. if fh2, exists := bar2.FileHistories[key]; exists {
  411. wg.Add(1)
  412. go func(fh1, fh2 [][]int64, key string) {
  413. defer wg.Done()
  414. historyMutex.Lock()
  415. defer historyMutex.Unlock()
  416. merged.FileHistories[key] = mergeMatrices(
  417. fh1, fh2, bar1.granularity, bar1.sampling, bar2.granularity, bar2.sampling, c1, c2)
  418. }(fh1, fh2, key)
  419. } else {
  420. historyMutex.Lock()
  421. merged.FileHistories[key] = fh1
  422. historyMutex.Unlock()
  423. }
  424. }
  425. for key, fh2 := range bar2.FileHistories {
  426. if _, exists := bar1.FileHistories[key]; !exists {
  427. historyMutex.Lock()
  428. merged.FileHistories[key] = fh2
  429. historyMutex.Unlock()
  430. }
  431. }
  432. }
  433. if len(merged.reversedPeopleDict) > 0 {
  434. merged.PeopleHistories = make([][][]int64, len(merged.reversedPeopleDict))
  435. for i, key := range merged.reversedPeopleDict {
  436. ptrs := people[key]
  437. if ptrs[1] < 0 {
  438. if len(bar2.PeopleHistories) > 0 {
  439. merged.PeopleHistories[i] = bar2.PeopleHistories[ptrs[2]]
  440. }
  441. } else if ptrs[2] < 0 {
  442. if len(bar1.PeopleHistories) > 0 {
  443. merged.PeopleHistories[i] = bar1.PeopleHistories[ptrs[1]]
  444. }
  445. } else {
  446. wg.Add(1)
  447. go func(i int) {
  448. defer wg.Done()
  449. var m1, m2 [][]int64
  450. if len(bar1.PeopleHistories) > 0 {
  451. m1 = bar1.PeopleHistories[ptrs[1]]
  452. }
  453. if len(bar2.PeopleHistories) > 0 {
  454. m2 = bar2.PeopleHistories[ptrs[2]]
  455. }
  456. merged.PeopleHistories[i] = mergeMatrices(
  457. m1, m2,
  458. bar1.granularity, bar1.sampling,
  459. bar2.granularity, bar2.sampling,
  460. c1, c2,
  461. )
  462. }(i)
  463. }
  464. }
  465. wg.Add(1)
  466. go func() {
  467. defer wg.Done()
  468. if len(bar2.PeopleMatrix) == 0 {
  469. merged.PeopleMatrix = bar1.PeopleMatrix
  470. // extend the matrix in both directions
  471. for i := 0; i < len(merged.PeopleMatrix); i++ {
  472. for j := len(bar1.reversedPeopleDict); j < len(merged.reversedPeopleDict); j++ {
  473. merged.PeopleMatrix[i] = append(merged.PeopleMatrix[i], 0)
  474. }
  475. }
  476. for i := len(bar1.reversedPeopleDict); i < len(merged.reversedPeopleDict); i++ {
  477. merged.PeopleMatrix = append(
  478. merged.PeopleMatrix, make([]int64, len(merged.reversedPeopleDict)+2))
  479. }
  480. } else {
  481. merged.PeopleMatrix = make([][]int64, len(merged.reversedPeopleDict))
  482. for i := range merged.PeopleMatrix {
  483. merged.PeopleMatrix[i] = make([]int64, len(merged.reversedPeopleDict)+2)
  484. }
  485. for i, key := range bar1.reversedPeopleDict {
  486. mi := people[key][0] // index in merged.reversedPeopleDict
  487. copy(merged.PeopleMatrix[mi][:2], bar1.PeopleMatrix[i][:2])
  488. for j, val := range bar1.PeopleMatrix[i][2:] {
  489. merged.PeopleMatrix[mi][2+people[bar1.reversedPeopleDict[j]][0]] = val
  490. }
  491. }
  492. for i, key := range bar2.reversedPeopleDict {
  493. mi := people[key][0] // index in merged.reversedPeopleDict
  494. merged.PeopleMatrix[mi][0] += bar2.PeopleMatrix[i][0]
  495. merged.PeopleMatrix[mi][1] += bar2.PeopleMatrix[i][1]
  496. for j, val := range bar2.PeopleMatrix[i][2:] {
  497. merged.PeopleMatrix[mi][2+people[bar2.reversedPeopleDict[j]][0]] += val
  498. }
  499. }
  500. }
  501. }()
  502. }
  503. wg.Wait()
  504. return merged
  505. }
  506. // mergeMatrices takes two [number of samples][number of bands] matrices,
  507. // resamples them to days so that they become square, sums and resamples back to the
  508. // least of (sampling1, sampling2) and (granularity1, granularity2).
  509. func mergeMatrices(m1, m2 [][]int64, granularity1, sampling1, granularity2, sampling2 int,
  510. c1, c2 *core.CommonAnalysisResult) [][]int64 {
  511. commonMerged := *c1
  512. commonMerged.Merge(c2)
  513. var granularity, sampling int
  514. if sampling1 < sampling2 {
  515. sampling = sampling1
  516. } else {
  517. sampling = sampling2
  518. }
  519. if granularity1 < granularity2 {
  520. granularity = granularity1
  521. } else {
  522. granularity = granularity2
  523. }
  524. size := int((commonMerged.EndTime - commonMerged.BeginTime) / (3600 * 24))
  525. daily := make([][]float32, size+granularity)
  526. for i := range daily {
  527. daily[i] = make([]float32, size+sampling)
  528. }
  529. if len(m1) > 0 {
  530. addBurndownMatrix(m1, granularity1, sampling1, daily,
  531. int(c1.BeginTime-commonMerged.BeginTime)/(3600*24))
  532. }
  533. if len(m2) > 0 {
  534. addBurndownMatrix(m2, granularity2, sampling2, daily,
  535. int(c2.BeginTime-commonMerged.BeginTime)/(3600*24))
  536. }
  537. // convert daily to [][]in(t64
  538. result := make([][]int64, (size+sampling-1)/sampling)
  539. for i := range result {
  540. result[i] = make([]int64, (size+granularity-1)/granularity)
  541. sampledIndex := i * sampling
  542. if i == len(result)-1 {
  543. sampledIndex = size - 1
  544. }
  545. for j := 0; j < len(result[i]); j++ {
  546. accum := float32(0)
  547. for k := j * granularity; k < (j+1)*granularity && k < size; k++ {
  548. accum += daily[sampledIndex][k]
  549. }
  550. result[i][j] = int64(accum)
  551. }
  552. }
  553. return result
  554. }
  555. // Explode `matrix` so that it is daily sampled and has daily bands, shift by `offset` days
  556. // and add to the accumulator. `daily` size is square and is guaranteed to fit `matrix` by
  557. // the caller.
  558. // Rows: *at least* len(matrix) * sampling + offset
  559. // Columns: *at least* len(matrix[...]) * granularity + offset
  560. // `matrix` can be sparse, so that the last columns which are equal to 0 are truncated.
  561. func addBurndownMatrix(matrix [][]int64, granularity, sampling int, daily [][]float32, offset int) {
  562. // Determine the maximum number of bands; the actual one may be larger but we do not care
  563. maxCols := 0
  564. for _, row := range matrix {
  565. if maxCols < len(row) {
  566. maxCols = len(row)
  567. }
  568. }
  569. neededRows := len(matrix)*sampling + offset
  570. if len(daily) < neededRows {
  571. panic(fmt.Sprintf("merge bug: too few daily rows: required %d, have %d",
  572. neededRows, len(daily)))
  573. }
  574. if len(daily[0]) < maxCols {
  575. panic(fmt.Sprintf("merge bug: too few daily cols: required %d, have %d",
  576. maxCols, len(daily[0])))
  577. }
  578. for x := 0; x < maxCols; x++ {
  579. for y := 0; y < len(matrix); y++ {
  580. if x*granularity > (y+1)*sampling {
  581. // the future is zeros
  582. continue
  583. }
  584. decay := func(startIndex int, startVal float32) {
  585. if startVal == 0 {
  586. return
  587. }
  588. k := float32(matrix[y][x]) / startVal // <= 1
  589. scale := float32((y+1)*sampling - startIndex)
  590. for i := x * granularity; i < (x+1)*granularity; i++ {
  591. initial := daily[startIndex-1+offset][i+offset]
  592. for j := startIndex; j < (y+1)*sampling; j++ {
  593. daily[j+offset][i+offset] = initial * (1 + (k-1)*float32(j-startIndex+1)/scale)
  594. }
  595. }
  596. }
  597. raise := func(finishIndex int, finishVal float32) {
  598. var initial float32
  599. if y > 0 {
  600. initial = float32(matrix[y-1][x])
  601. }
  602. startIndex := y * sampling
  603. if startIndex < x*granularity {
  604. startIndex = x * granularity
  605. }
  606. if startIndex == finishIndex {
  607. return
  608. }
  609. avg := (finishVal - initial) / float32(finishIndex-startIndex)
  610. for j := y * sampling; j < finishIndex; j++ {
  611. for i := startIndex; i <= j; i++ {
  612. daily[j+offset][i+offset] = avg
  613. }
  614. }
  615. // copy [x*g..y*s)
  616. for j := y * sampling; j < finishIndex; j++ {
  617. for i := x * granularity; i < y*sampling; i++ {
  618. daily[j+offset][i+offset] = daily[j-1+offset][i+offset]
  619. }
  620. }
  621. }
  622. if (x+1)*granularity >= (y+1)*sampling {
  623. // x*granularity <= (y+1)*sampling
  624. // 1. x*granularity <= y*sampling
  625. // y*sampling..(y+1)sampling
  626. //
  627. // x+1
  628. // /
  629. // /
  630. // / y+1 -|
  631. // / |
  632. // / y -|
  633. // /
  634. // / x
  635. //
  636. // 2. x*granularity > y*sampling
  637. // x*granularity..(y+1)sampling
  638. //
  639. // x+1
  640. // /
  641. // /
  642. // / y+1 -|
  643. // / |
  644. // / x -|
  645. // /
  646. // / y
  647. if x*granularity <= y*sampling {
  648. raise((y+1)*sampling, float32(matrix[y][x]))
  649. } else if (y+1)*sampling > x*granularity {
  650. raise((y+1)*sampling, float32(matrix[y][x]))
  651. avg := float32(matrix[y][x]) / float32((y+1)*sampling-x*granularity)
  652. for j := x * granularity; j < (y+1)*sampling; j++ {
  653. for i := x * granularity; i <= j; i++ {
  654. daily[j+offset][i+offset] = avg
  655. }
  656. }
  657. }
  658. } else if (x+1)*granularity >= y*sampling {
  659. // y*sampling <= (x+1)*granularity < (y+1)sampling
  660. // y*sampling..(x+1)*granularity
  661. // (x+1)*granularity..(y+1)sampling
  662. // x+1
  663. // /\
  664. // / \
  665. // / \
  666. // / y+1
  667. // /
  668. // y
  669. v1 := float32(matrix[y-1][x])
  670. v2 := float32(matrix[y][x])
  671. var peak float32
  672. delta := float32((x+1)*granularity - y*sampling)
  673. var scale float32
  674. var previous float32
  675. if y > 0 && (y-1)*sampling >= x*granularity {
  676. // x*g <= (y-1)*s <= y*s <= (x+1)*g <= (y+1)*s
  677. // |________|.......^
  678. if y > 1 {
  679. previous = float32(matrix[y-2][x])
  680. }
  681. scale = float32(sampling)
  682. } else {
  683. // (y-1)*s < x*g <= y*s <= (x+1)*g <= (y+1)*s
  684. // |______|.......^
  685. if y == 0 {
  686. scale = float32(sampling)
  687. } else {
  688. scale = float32(y*sampling - x*granularity)
  689. }
  690. }
  691. peak = v1 + (v1-previous)/scale*delta
  692. if v2 > peak {
  693. // we need to adjust the peak, it may not be less than the decayed value
  694. if y < len(matrix)-1 {
  695. // y*s <= (x+1)*g <= (y+1)*s < (y+2)*s
  696. // ^.........|_________|
  697. k := (v2 - float32(matrix[y+1][x])) / float32(sampling) // > 0
  698. peak = float32(matrix[y][x]) + k*float32((y+1)*sampling-(x+1)*granularity)
  699. // peak > v2 > v1
  700. } else {
  701. peak = v2
  702. // not enough data to interpolate; this is at least not restricted
  703. }
  704. }
  705. raise((x+1)*granularity, peak)
  706. decay((x+1)*granularity, peak)
  707. } else {
  708. // (x+1)*granularity < y*sampling
  709. // y*sampling..(y+1)sampling
  710. decay(y*sampling, float32(matrix[y-1][x]))
  711. }
  712. }
  713. }
  714. }
  715. func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
  716. fmt.Fprintln(writer, " granularity:", result.granularity)
  717. fmt.Fprintln(writer, " sampling:", result.sampling)
  718. yaml.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
  719. if len(result.FileHistories) > 0 {
  720. fmt.Fprintln(writer, " files:")
  721. keys := sortedKeys(result.FileHistories)
  722. for _, key := range keys {
  723. yaml.PrintMatrix(writer, result.FileHistories[key], 4, key, true)
  724. }
  725. }
  726. if len(result.PeopleHistories) > 0 {
  727. fmt.Fprintln(writer, " people_sequence:")
  728. for key := range result.PeopleHistories {
  729. fmt.Fprintln(writer, " - "+yaml.SafeString(result.reversedPeopleDict[key]))
  730. }
  731. fmt.Fprintln(writer, " people:")
  732. for key, val := range result.PeopleHistories {
  733. yaml.PrintMatrix(writer, val, 4, result.reversedPeopleDict[key], true)
  734. }
  735. fmt.Fprintln(writer, " people_interaction: |-")
  736. yaml.PrintMatrix(writer, result.PeopleMatrix, 4, "", false)
  737. }
  738. }
  739. func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer io.Writer) error {
  740. message := pb.BurndownAnalysisResults{
  741. Granularity: int32(result.granularity),
  742. Sampling: int32(result.sampling),
  743. }
  744. if len(result.GlobalHistory) > 0 {
  745. message.Project = pb.ToBurndownSparseMatrix(result.GlobalHistory, "project")
  746. }
  747. if len(result.FileHistories) > 0 {
  748. message.Files = make([]*pb.BurndownSparseMatrix, len(result.FileHistories))
  749. keys := sortedKeys(result.FileHistories)
  750. i := 0
  751. for _, key := range keys {
  752. message.Files[i] = pb.ToBurndownSparseMatrix(
  753. result.FileHistories[key], key)
  754. i++
  755. }
  756. }
  757. if len(result.PeopleHistories) > 0 {
  758. message.People = make(
  759. []*pb.BurndownSparseMatrix, len(result.PeopleHistories))
  760. for key, val := range result.PeopleHistories {
  761. if len(val) > 0 {
  762. message.People[key] = pb.ToBurndownSparseMatrix(val, result.reversedPeopleDict[key])
  763. }
  764. }
  765. message.PeopleInteraction = pb.DenseToCompressedSparseRowMatrix(result.PeopleMatrix)
  766. }
  767. serialized, err := proto.Marshal(&message)
  768. if err != nil {
  769. return err
  770. }
  771. writer.Write(serialized)
  772. return nil
  773. }
  774. func sortedKeys(m map[string][][]int64) []string {
  775. keys := make([]string, 0, len(m))
  776. for k := range m {
  777. keys = append(keys, k)
  778. }
  779. sort.Strings(keys)
  780. return keys
  781. }
  782. func checkClose(c io.Closer) {
  783. if err := c.Close(); err != nil {
  784. panic(err)
  785. }
  786. }
  787. // We do a hack and store the day in the first 14 bits and the author index in the last 18.
  788. // Strictly speaking, int can be 64-bit and then the author index occupies 32+18 bits.
  789. // This hack is needed to simplify the values storage inside File-s. We can compare
  790. // different values together and they are compared as days for the same author.
  791. func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {
  792. if analyser.PeopleNumber == 0 {
  793. return day
  794. }
  795. result := day & burndown.TreeMergeMark
  796. result |= person << burndown.TreeMaxBinPower
  797. // This effectively means max (16383 - 1) days (>44 years) and (131072 - 2) devs.
  798. // One day less because burndown.TreeMergeMark = ((1 << 14) - 1) is a special day.
  799. return result
  800. }
  801. func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {
  802. if analyser.PeopleNumber == 0 {
  803. return identity.AuthorMissing, value
  804. }
  805. return value >> burndown.TreeMaxBinPower, value & burndown.TreeMergeMark
  806. }
  807. func (analyser *BurndownAnalysis) onNewDay() {
  808. day := analyser.day
  809. sampling := analyser.Sampling
  810. delta := (day / sampling) - (analyser.previousDay / sampling)
  811. if delta > 0 {
  812. analyser.previousDay = day
  813. gs, fss, pss := analyser.groupStatus()
  814. analyser.updateHistories(delta, gs, fss, pss)
  815. }
  816. }
  817. func (analyser *BurndownAnalysis) updateStatus(
  818. status interface{}, _ int, previousValue int, delta int) {
  819. _, previousTime := analyser.unpackPersonWithDay(previousValue)
  820. status.(map[int]int64)[previousTime] += int64(delta)
  821. }
  822. func (analyser *BurndownAnalysis) updatePeople(
  823. peopleUncasted interface{}, _ int, previousValue int, delta int) {
  824. previousAuthor, previousTime := analyser.unpackPersonWithDay(previousValue)
  825. if previousAuthor == identity.AuthorMissing {
  826. return
  827. }
  828. people := peopleUncasted.([]map[int]int64)
  829. stats := people[previousAuthor]
  830. if stats == nil {
  831. stats = map[int]int64{}
  832. people[previousAuthor] = stats
  833. }
  834. stats[previousTime] += int64(delta)
  835. }
  836. func (analyser *BurndownAnalysis) updateMatrix(
  837. matrixUncasted interface{}, currentTime int, previousTime int, delta int) {
  838. matrix := matrixUncasted.([]map[int]int64)
  839. newAuthor, _ := analyser.unpackPersonWithDay(currentTime)
  840. oldAuthor, _ := analyser.unpackPersonWithDay(previousTime)
  841. if oldAuthor == identity.AuthorMissing {
  842. return
  843. }
  844. if newAuthor == oldAuthor && delta > 0 {
  845. newAuthor = authorSelf
  846. }
  847. row := matrix[oldAuthor]
  848. if row == nil {
  849. row = map[int]int64{}
  850. matrix[oldAuthor] = row
  851. }
  852. cell, exists := row[newAuthor]
  853. if !exists {
  854. row[newAuthor] = 0
  855. cell = 0
  856. }
  857. row[newAuthor] = cell + int64(delta)
  858. }
  859. func (analyser *BurndownAnalysis) newFile(
  860. hash plumbing.Hash, author int, day int, size int, global map[int]int64,
  861. people []map[int]int64, matrix []map[int]int64) *burndown.File {
  862. statuses := make([]burndown.Status, 1)
  863. statuses[0] = burndown.NewStatus(global, analyser.updateStatus)
  864. if analyser.TrackFiles {
  865. statuses = append(statuses, burndown.NewStatus(map[int]int64{}, analyser.updateStatus))
  866. }
  867. if analyser.PeopleNumber > 0 {
  868. statuses = append(statuses, burndown.NewStatus(people, analyser.updatePeople))
  869. statuses = append(statuses, burndown.NewStatus(matrix, analyser.updateMatrix))
  870. day = analyser.packPersonWithDay(author, day)
  871. }
  872. return burndown.NewFile(hash, day, size, statuses...)
  873. }
  874. func (analyser *BurndownAnalysis) handleInsertion(
  875. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  876. blob := cache[change.To.TreeEntry.Hash]
  877. lines, err := items.CountLines(blob)
  878. if err != nil {
  879. if err.Error() == "binary" {
  880. return nil
  881. }
  882. return err
  883. }
  884. name := change.To.Name
  885. file, exists := analyser.files[name]
  886. if exists {
  887. return fmt.Errorf("file %s already exists", name)
  888. }
  889. file = analyser.newFile(
  890. blob.Hash, author, analyser.day, lines,
  891. analyser.globalStatus, analyser.people, analyser.matrix)
  892. analyser.files[name] = file
  893. return nil
  894. }
  895. func (analyser *BurndownAnalysis) handleDeletion(
  896. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  897. blob := cache[change.From.TreeEntry.Hash]
  898. lines, err := items.CountLines(blob)
  899. if err != nil {
  900. if err.Error() == "binary" {
  901. return nil
  902. }
  903. return err
  904. }
  905. name := change.From.Name
  906. file := analyser.files[name]
  907. file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)
  908. file.Hash = plumbing.ZeroHash
  909. delete(analyser.files, name)
  910. return nil
  911. }
  912. func (analyser *BurndownAnalysis) handleModification(
  913. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob,
  914. diffs map[string]items.FileDiffData) error {
  915. file, exists := analyser.files[change.From.Name]
  916. if !exists {
  917. // this indeed may happen
  918. return analyser.handleInsertion(change, author, cache)
  919. }
  920. file.Hash = change.To.TreeEntry.Hash
  921. // possible rename
  922. if change.To.Name != change.From.Name {
  923. err := analyser.handleRename(change.From.Name, change.To.Name)
  924. if err != nil {
  925. return err
  926. }
  927. }
  928. thisDiffs := diffs[change.To.Name]
  929. if file.Len() != thisDiffs.OldLinesOfCode {
  930. log.Printf("====TREE====\n%s", file.Dump())
  931. return fmt.Errorf("%s: internal integrity error src %d != %d %s -> %s",
  932. change.To.Name, thisDiffs.OldLinesOfCode, file.Len(),
  933. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String())
  934. }
  935. // we do not call RunesToDiffLines so the number of lines equals
  936. // to the rune count
  937. position := 0
  938. pending := diffmatchpatch.Diff{Text: ""}
  939. apply := func(edit diffmatchpatch.Diff) {
  940. length := utf8.RuneCountInString(edit.Text)
  941. if edit.Type == diffmatchpatch.DiffInsert {
  942. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)
  943. position += length
  944. } else {
  945. file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)
  946. }
  947. if analyser.Debug {
  948. file.Validate()
  949. }
  950. }
  951. for _, edit := range thisDiffs.Diffs {
  952. dumpBefore := ""
  953. if analyser.Debug {
  954. dumpBefore = file.Dump()
  955. }
  956. length := utf8.RuneCountInString(edit.Text)
  957. debugError := func() {
  958. log.Printf("%s: internal diff error\n", change.To.Name)
  959. log.Printf("Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,
  960. length, utf8.RuneCountInString(pending.Text))
  961. if dumpBefore != "" {
  962. log.Printf("====TREE BEFORE====\n%s====END====\n", dumpBefore)
  963. }
  964. log.Printf("====TREE AFTER====\n%s====END====\n", file.Dump())
  965. }
  966. switch edit.Type {
  967. case diffmatchpatch.DiffEqual:
  968. if pending.Text != "" {
  969. apply(pending)
  970. pending.Text = ""
  971. }
  972. position += length
  973. case diffmatchpatch.DiffInsert:
  974. if pending.Text != "" {
  975. if pending.Type == diffmatchpatch.DiffInsert {
  976. debugError()
  977. return errors.New("DiffInsert may not appear after DiffInsert")
  978. }
  979. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,
  980. utf8.RuneCountInString(pending.Text))
  981. if analyser.Debug {
  982. file.Validate()
  983. }
  984. position += length
  985. pending.Text = ""
  986. } else {
  987. pending = edit
  988. }
  989. case diffmatchpatch.DiffDelete:
  990. if pending.Text != "" {
  991. debugError()
  992. return errors.New("DiffDelete may not appear after DiffInsert/DiffDelete")
  993. }
  994. pending = edit
  995. default:
  996. debugError()
  997. return fmt.Errorf("diff operation is not supported: %d", edit.Type)
  998. }
  999. }
  1000. if pending.Text != "" {
  1001. apply(pending)
  1002. pending.Text = ""
  1003. }
  1004. if file.Len() != thisDiffs.NewLinesOfCode {
  1005. return fmt.Errorf("%s: internal integrity error dst %d != %d",
  1006. change.To.Name, thisDiffs.NewLinesOfCode, file.Len())
  1007. }
  1008. return nil
  1009. }
  1010. func (analyser *BurndownAnalysis) handleRename(from, to string) error {
  1011. file, exists := analyser.files[from]
  1012. if !exists {
  1013. return fmt.Errorf("file %s does not exist", from)
  1014. }
  1015. analyser.files[to] = file
  1016. delete(analyser.files, from)
  1017. return nil
  1018. }
  1019. func (analyser *BurndownAnalysis) groupStatus() ([]int64, map[string][]int64, [][]int64) {
  1020. granularity := analyser.Granularity
  1021. if granularity == 0 {
  1022. granularity = 1
  1023. }
  1024. day := analyser.day
  1025. day++
  1026. adjust := 0
  1027. if day%granularity != 0 {
  1028. adjust = 1
  1029. }
  1030. global := make([]int64, day/granularity+adjust)
  1031. var group int64
  1032. for i := 0; i < day; i++ {
  1033. group += analyser.globalStatus[i]
  1034. if (i % granularity) == (granularity - 1) {
  1035. global[i/granularity] = group
  1036. group = 0
  1037. }
  1038. }
  1039. if day%granularity != 0 {
  1040. global[len(global)-1] = group
  1041. }
  1042. locals := make(map[string][]int64)
  1043. if analyser.TrackFiles {
  1044. for key, file := range analyser.files {
  1045. status := make([]int64, day/granularity+adjust)
  1046. var group int64
  1047. for i := 0; i < day; i++ {
  1048. group += file.Status(1).(map[int]int64)[i]
  1049. if (i % granularity) == (granularity - 1) {
  1050. status[i/granularity] = group
  1051. group = 0
  1052. }
  1053. }
  1054. if day%granularity != 0 {
  1055. status[len(status)-1] = group
  1056. }
  1057. locals[key] = status
  1058. }
  1059. }
  1060. peoples := make([][]int64, len(analyser.people))
  1061. for key, person := range analyser.people {
  1062. status := make([]int64, day/granularity+adjust)
  1063. var group int64
  1064. for i := 0; i < day; i++ {
  1065. group += person[i]
  1066. if (i % granularity) == (granularity - 1) {
  1067. status[i/granularity] = group
  1068. group = 0
  1069. }
  1070. }
  1071. if day%granularity != 0 {
  1072. status[len(status)-1] = group
  1073. }
  1074. peoples[key] = status
  1075. }
  1076. return global, locals, peoples
  1077. }
  1078. func (analyser *BurndownAnalysis) updateHistories(
  1079. delta int, globalStatus []int64, fileStatuses map[string][]int64, peopleStatuses [][]int64) {
  1080. for i := 0; i < delta; i++ {
  1081. analyser.globalHistory = append(analyser.globalHistory, globalStatus)
  1082. }
  1083. toDelete := make([]string, 0)
  1084. for key, fh := range analyser.fileHistories {
  1085. ls, exists := fileStatuses[key]
  1086. if !exists {
  1087. toDelete = append(toDelete, key)
  1088. } else {
  1089. for i := 0; i < delta; i++ {
  1090. fh = append(fh, ls)
  1091. }
  1092. analyser.fileHistories[key] = fh
  1093. }
  1094. }
  1095. for _, key := range toDelete {
  1096. delete(analyser.fileHistories, key)
  1097. }
  1098. for key, ls := range fileStatuses {
  1099. fh, exists := analyser.fileHistories[key]
  1100. if exists {
  1101. continue
  1102. }
  1103. for i := 0; i < delta; i++ {
  1104. fh = append(fh, ls)
  1105. }
  1106. analyser.fileHistories[key] = fh
  1107. }
  1108. for key, ph := range analyser.peopleHistories {
  1109. ls := peopleStatuses[key]
  1110. for i := 0; i < delta; i++ {
  1111. ph = append(ph, ls)
  1112. }
  1113. analyser.peopleHistories[key] = ph
  1114. }
  1115. }
  1116. func init() {
  1117. core.Registry.Register(&BurndownAnalysis{})
  1118. }