burndown.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. package hercules
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "os"
  7. "sort"
  8. "unicode/utf8"
  9. "github.com/gogo/protobuf/proto"
  10. "github.com/sergi/go-diff/diffmatchpatch"
  11. "gopkg.in/src-d/go-git.v4"
  12. "gopkg.in/src-d/go-git.v4/plumbing"
  13. "gopkg.in/src-d/go-git.v4/plumbing/object"
  14. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  15. "gopkg.in/src-d/hercules.v3/pb"
  16. "gopkg.in/src-d/hercules.v3/stdout"
  17. )
  18. // BurndownAnalyser allows to gather the line burndown statistics for a Git repository.
  19. type BurndownAnalysis struct {
  20. // Granularity sets the size of each band - the number of days it spans.
  21. // Smaller values provide better resolution but require more work and eat more
  22. // memory. 30 days is usually enough.
  23. Granularity int
  24. // Sampling sets how detailed is the statistic - the size of the interval in
  25. // days between consecutive measurements. It is usually a good idea to set it
  26. // <= Granularity. Try 15 or 30.
  27. Sampling int
  28. // TrackFiles enables or disables the fine-grained per-file burndown analysis.
  29. // It does not change the top level burndown results.
  30. TrackFiles bool
  31. // The number of developers for which to collect the burndown stats. 0 disables it.
  32. PeopleNumber int
  33. // Debug activates the debugging mode. Analyse() runs slower in this mode
  34. // but it accurately checks all the intermediate states for invariant
  35. // violations.
  36. Debug bool
  37. // Repository points to the analysed Git repository struct from go-git.
  38. repository *git.Repository
  39. // globalStatus is the current daily alive number of lines; key is the number
  40. // of days from the beginning of the history.
  41. globalStatus map[int]int64
  42. // globalHistory is the weekly snapshots of globalStatus.
  43. globalHistory [][]int64
  44. // fileHistories is the weekly snapshots of each file's status.
  45. fileHistories map[string][][]int64
  46. // peopleHistories is the weekly snapshots of each person's status.
  47. peopleHistories [][][]int64
  48. // files is the mapping <file path> -> *File.
  49. files map[string]*File
  50. // matrix is the mutual deletions and self insertions.
  51. matrix []map[int]int64
  52. // people is the people's individual time stats.
  53. people []map[int]int64
  54. // day is the most recent day index processed.
  55. day int
  56. // previousDay is the day from the previous sample period -
  57. // different from DaysSinceStart.previousDay.
  58. previousDay int
  59. // references IdentityDetector.ReversedPeopleDict
  60. reversedPeopleDict []string
  61. }
  62. type BurndownResult struct {
  63. GlobalHistory [][]int64
  64. FileHistories map[string][][]int64
  65. PeopleHistories [][][]int64
  66. PeopleMatrix [][]int64
  67. }
  68. const (
  69. ConfigBurndownGranularity = "Burndown.Granularity"
  70. ConfigBurndownSampling = "Burndown.Sampling"
  71. ConfigBurndownTrackFiles = "Burndown.TrackFiles"
  72. ConfigBurndownTrackPeople = "Burndown.TrackPeople"
  73. ConfigBurndownDebug = "Burndown.Debug"
  74. )
  75. func (analyser *BurndownAnalysis) Name() string {
  76. return "Burndown"
  77. }
  78. func (analyser *BurndownAnalysis) Provides() []string {
  79. return []string{}
  80. }
  81. func (analyser *BurndownAnalysis) Requires() []string {
  82. arr := [...]string{"file_diff", "changes", "blob_cache", "day", "author"}
  83. return arr[:]
  84. }
  85. func (analyser *BurndownAnalysis) ListConfigurationOptions() []ConfigurationOption {
  86. options := [...]ConfigurationOption{{
  87. Name: ConfigBurndownGranularity,
  88. Description: "How many days there are in a single band.",
  89. Flag: "granularity",
  90. Type: IntConfigurationOption,
  91. Default: 30}, {
  92. Name: ConfigBurndownSampling,
  93. Description: "How frequently to record the state in days.",
  94. Flag: "sampling",
  95. Type: IntConfigurationOption,
  96. Default: 30}, {
  97. Name: ConfigBurndownTrackFiles,
  98. Description: "Record detailed statistics per each file.",
  99. Flag: "burndown-files",
  100. Type: BoolConfigurationOption,
  101. Default: false}, {
  102. Name: ConfigBurndownTrackPeople,
  103. Description: "Record detailed statistics per each developer.",
  104. Flag: "burndown-people",
  105. Type: BoolConfigurationOption,
  106. Default: false}, {
  107. Name: ConfigBurndownDebug,
  108. Description: "Validate the trees on each step.",
  109. Flag: "burndown-debug",
  110. Type: BoolConfigurationOption,
  111. Default: false},
  112. }
  113. return options[:]
  114. }
  115. func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) {
  116. if val, exists := facts[ConfigBurndownGranularity].(int); exists {
  117. analyser.Granularity = val
  118. }
  119. if val, exists := facts[ConfigBurndownSampling].(int); exists {
  120. analyser.Sampling = val
  121. }
  122. if val, exists := facts[ConfigBurndownTrackFiles].(bool); exists {
  123. analyser.TrackFiles = val
  124. }
  125. if people, exists := facts[ConfigBurndownTrackPeople].(bool); people {
  126. if val, exists := facts[FactIdentityDetectorPeopleCount].(int); exists {
  127. analyser.PeopleNumber = val
  128. analyser.reversedPeopleDict = facts[FactIdentityDetectorReversedPeopleDict].([]string)
  129. }
  130. } else if exists {
  131. analyser.PeopleNumber = 0
  132. }
  133. if val, exists := facts[ConfigBurndownDebug].(bool); exists {
  134. analyser.Debug = val
  135. }
  136. }
  137. func (analyser *BurndownAnalysis) Flag() string {
  138. return "burndown"
  139. }
  140. func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) {
  141. if analyser.Granularity <= 0 {
  142. fmt.Fprintln(os.Stderr, "Warning: adjusted the granularity to 30 days")
  143. analyser.Granularity = 30
  144. }
  145. if analyser.Sampling <= 0 {
  146. fmt.Fprintln(os.Stderr, "Warning: adjusted the sampling to 30 days")
  147. analyser.Sampling = 30
  148. }
  149. analyser.repository = repository
  150. analyser.globalStatus = map[int]int64{}
  151. analyser.globalHistory = [][]int64{}
  152. analyser.fileHistories = map[string][][]int64{}
  153. analyser.peopleHistories = make([][][]int64, analyser.PeopleNumber)
  154. analyser.files = map[string]*File{}
  155. analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)
  156. analyser.people = make([]map[int]int64, analyser.PeopleNumber)
  157. analyser.day = 0
  158. analyser.previousDay = 0
  159. }
  160. func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  161. sampling := analyser.Sampling
  162. if sampling == 0 {
  163. sampling = 1
  164. }
  165. author := deps["author"].(int)
  166. analyser.day = deps["day"].(int)
  167. delta := (analyser.day / sampling) - (analyser.previousDay / sampling)
  168. if delta > 0 {
  169. analyser.previousDay = analyser.day
  170. gs, fss, pss := analyser.groupStatus()
  171. analyser.updateHistories(gs, fss, pss, delta)
  172. }
  173. cache := deps["blob_cache"].(map[plumbing.Hash]*object.Blob)
  174. treeDiffs := deps["changes"].(object.Changes)
  175. fileDiffs := deps["file_diff"].(map[string]FileDiffData)
  176. for _, change := range treeDiffs {
  177. action, err := change.Action()
  178. if err != nil {
  179. return nil, err
  180. }
  181. switch action {
  182. case merkletrie.Insert:
  183. err = analyser.handleInsertion(change, author, cache)
  184. case merkletrie.Delete:
  185. err = analyser.handleDeletion(change, author, cache)
  186. case merkletrie.Modify:
  187. err = analyser.handleModification(change, author, cache, fileDiffs)
  188. }
  189. if err != nil {
  190. return nil, err
  191. }
  192. }
  193. return nil, nil
  194. }
  195. // Finalize() returns the list of snapshots of the cumulative line edit times
  196. // and the similar lists for every file which is alive in HEAD.
  197. // The number of snapshots (the first dimension >[]<[]int64) depends on
  198. // Analyser.Sampling (the more Sampling, the less the value); the length of
  199. // each snapshot depends on Analyser.Granularity (the more Granularity,
  200. // the less the value).
  201. func (analyser *BurndownAnalysis) Finalize() interface{} {
  202. gs, fss, pss := analyser.groupStatus()
  203. analyser.updateHistories(gs, fss, pss, 1)
  204. for key, statuses := range analyser.fileHistories {
  205. if len(statuses) == len(analyser.globalHistory) {
  206. continue
  207. }
  208. padding := make([][]int64, len(analyser.globalHistory)-len(statuses))
  209. for i := range padding {
  210. padding[i] = make([]int64, len(analyser.globalStatus))
  211. }
  212. analyser.fileHistories[key] = append(padding, statuses...)
  213. }
  214. peopleMatrix := make([][]int64, analyser.PeopleNumber)
  215. for i, row := range analyser.matrix {
  216. mrow := make([]int64, analyser.PeopleNumber+2)
  217. peopleMatrix[i] = mrow
  218. for key, val := range row {
  219. if key == MISSING_AUTHOR {
  220. key = -1
  221. } else if key == SELF_AUTHOR {
  222. key = -2
  223. }
  224. mrow[key+2] = val
  225. }
  226. }
  227. return BurndownResult{
  228. GlobalHistory: analyser.globalHistory,
  229. FileHistories: analyser.fileHistories,
  230. PeopleHistories: analyser.peopleHistories,
  231. PeopleMatrix: peopleMatrix,
  232. }
  233. }
  234. func (analyser *BurndownAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
  235. burndownResult := result.(BurndownResult)
  236. if binary {
  237. return analyser.serializeBinary(&burndownResult, writer)
  238. }
  239. analyser.serializeText(&burndownResult, writer)
  240. return nil
  241. }
  242. func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
  243. fmt.Fprintln(writer, " granularity:", analyser.Granularity)
  244. fmt.Fprintln(writer, " sampling:", analyser.Sampling)
  245. stdout.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
  246. if len(result.FileHistories) > 0 {
  247. fmt.Fprintln(writer, " files:")
  248. keys := sortedKeys(result.FileHistories)
  249. for _, key := range keys {
  250. stdout.PrintMatrix(writer, result.FileHistories[key], 4, key, true)
  251. }
  252. }
  253. if len(result.PeopleHistories) > 0 {
  254. fmt.Fprintln(writer, " people_sequence:")
  255. for key := range result.PeopleHistories {
  256. fmt.Fprintln(writer, " - "+stdout.SafeString(analyser.reversedPeopleDict[key]))
  257. }
  258. fmt.Fprintln(writer, " people:")
  259. for key, val := range result.PeopleHistories {
  260. stdout.PrintMatrix(writer, val, 4, analyser.reversedPeopleDict[key], true)
  261. }
  262. fmt.Fprintln(writer, " people_interaction: |-")
  263. stdout.PrintMatrix(writer, result.PeopleMatrix, 4, "", false)
  264. }
  265. }
  266. func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer io.Writer) error {
  267. message := pb.BurndownAnalysisResults{
  268. Granularity: int32(analyser.Granularity),
  269. Sampling: int32(analyser.Sampling),
  270. Project: pb.ToBurndownSparseMatrix(result.GlobalHistory, "project"),
  271. }
  272. if len(result.FileHistories) > 0 {
  273. message.Files = make([]*pb.BurndownSparseMatrix, len(result.FileHistories))
  274. keys := sortedKeys(result.FileHistories)
  275. i := 0
  276. for _, key := range keys {
  277. message.Files[i] = pb.ToBurndownSparseMatrix(
  278. result.FileHistories[key], key)
  279. i++
  280. }
  281. }
  282. if len(result.PeopleHistories) > 0 {
  283. message.People = make(
  284. []*pb.BurndownSparseMatrix, len(result.PeopleHistories))
  285. for key, val := range result.PeopleHistories {
  286. message.People[key] = pb.ToBurndownSparseMatrix(val, analyser.reversedPeopleDict[key])
  287. }
  288. message.PeopleInteraction = pb.DenseToCompressedSparseRowMatrix(result.PeopleMatrix)
  289. }
  290. serialized, err := proto.Marshal(&message)
  291. if err != nil {
  292. return err
  293. }
  294. writer.Write(serialized)
  295. return nil
  296. }
  297. func sortedKeys(m map[string][][]int64) []string {
  298. keys := make([]string, 0, len(m))
  299. for k := range m {
  300. keys = append(keys, k)
  301. }
  302. sort.Strings(keys)
  303. return keys
  304. }
  305. func checkClose(c io.Closer) {
  306. if err := c.Close(); err != nil {
  307. panic(err)
  308. }
  309. }
  310. func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {
  311. if analyser.PeopleNumber == 0 {
  312. return day
  313. }
  314. result := day
  315. result |= person << 14
  316. // This effectively means max 16384 days (>44 years) and (131072 - 2) devs
  317. return result
  318. }
  319. func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {
  320. if analyser.PeopleNumber == 0 {
  321. return MISSING_AUTHOR, value
  322. }
  323. return value >> 14, value & 0x3FFF
  324. }
  325. func (analyser *BurndownAnalysis) updateStatus(
  326. status interface{}, _ int, previous_time_ int, delta int) {
  327. _, previous_time := analyser.unpackPersonWithDay(previous_time_)
  328. status.(map[int]int64)[previous_time] += int64(delta)
  329. }
  330. func (analyser *BurndownAnalysis) updatePeople(people interface{}, _ int, previous_time_ int, delta int) {
  331. old_author, previous_time := analyser.unpackPersonWithDay(previous_time_)
  332. if old_author == MISSING_AUTHOR {
  333. return
  334. }
  335. casted := people.([]map[int]int64)
  336. stats := casted[old_author]
  337. if stats == nil {
  338. stats = map[int]int64{}
  339. casted[old_author] = stats
  340. }
  341. stats[previous_time] += int64(delta)
  342. }
  343. func (analyser *BurndownAnalysis) updateMatrix(
  344. matrix_ interface{}, current_time int, previous_time int, delta int) {
  345. matrix := matrix_.([]map[int]int64)
  346. new_author, _ := analyser.unpackPersonWithDay(current_time)
  347. old_author, _ := analyser.unpackPersonWithDay(previous_time)
  348. if old_author == MISSING_AUTHOR {
  349. return
  350. }
  351. if new_author == old_author && delta > 0 {
  352. new_author = SELF_AUTHOR
  353. }
  354. row := matrix[old_author]
  355. if row == nil {
  356. row = map[int]int64{}
  357. matrix[old_author] = row
  358. }
  359. cell, exists := row[new_author]
  360. if !exists {
  361. row[new_author] = 0
  362. cell = 0
  363. }
  364. row[new_author] = cell + int64(delta)
  365. }
  366. func (analyser *BurndownAnalysis) newFile(
  367. author int, day int, size int, global map[int]int64, people []map[int]int64,
  368. matrix []map[int]int64) *File {
  369. statuses := make([]Status, 1)
  370. statuses[0] = NewStatus(global, analyser.updateStatus)
  371. if analyser.TrackFiles {
  372. statuses = append(statuses, NewStatus(map[int]int64{}, analyser.updateStatus))
  373. }
  374. if analyser.PeopleNumber > 0 {
  375. statuses = append(statuses, NewStatus(people, analyser.updatePeople))
  376. statuses = append(statuses, NewStatus(matrix, analyser.updateMatrix))
  377. day = analyser.packPersonWithDay(author, day)
  378. }
  379. return NewFile(day, size, statuses...)
  380. }
  381. func (analyser *BurndownAnalysis) handleInsertion(
  382. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  383. blob := cache[change.To.TreeEntry.Hash]
  384. lines, err := CountLines(blob)
  385. if err != nil {
  386. if err.Error() == "binary" {
  387. return nil
  388. }
  389. return err
  390. }
  391. name := change.To.Name
  392. file, exists := analyser.files[name]
  393. if exists {
  394. return errors.New(fmt.Sprintf("file %s already exists", name))
  395. }
  396. file = analyser.newFile(
  397. author, analyser.day, lines, analyser.globalStatus, analyser.people, analyser.matrix)
  398. analyser.files[name] = file
  399. return nil
  400. }
  401. func (analyser *BurndownAnalysis) handleDeletion(
  402. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  403. blob := cache[change.From.TreeEntry.Hash]
  404. lines, err := CountLines(blob)
  405. if err != nil {
  406. if err.Error() == "binary" {
  407. return nil
  408. }
  409. return err
  410. }
  411. name := change.From.Name
  412. file := analyser.files[name]
  413. file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)
  414. delete(analyser.files, name)
  415. return nil
  416. }
  417. func (analyser *BurndownAnalysis) handleModification(
  418. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob,
  419. diffs map[string]FileDiffData) error {
  420. file, exists := analyser.files[change.From.Name]
  421. if !exists {
  422. // this indeed may happen
  423. return analyser.handleInsertion(change, author, cache)
  424. }
  425. // possible rename
  426. if change.To.Name != change.From.Name {
  427. err := analyser.handleRename(change.From.Name, change.To.Name)
  428. if err != nil {
  429. return err
  430. }
  431. }
  432. thisDiffs := diffs[change.To.Name]
  433. if file.Len() != thisDiffs.OldLinesOfCode {
  434. fmt.Fprintf(os.Stderr, "====TREE====\n%s", file.Dump())
  435. return errors.New(fmt.Sprintf("%s: internal integrity error src %d != %d %s -> %s",
  436. change.To.Name, thisDiffs.OldLinesOfCode, file.Len(),
  437. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String()))
  438. }
  439. // we do not call RunesToDiffLines so the number of lines equals
  440. // to the rune count
  441. position := 0
  442. pending := diffmatchpatch.Diff{Text: ""}
  443. apply := func(edit diffmatchpatch.Diff) {
  444. length := utf8.RuneCountInString(edit.Text)
  445. if edit.Type == diffmatchpatch.DiffInsert {
  446. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)
  447. position += length
  448. } else {
  449. file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)
  450. }
  451. if analyser.Debug {
  452. file.Validate()
  453. }
  454. }
  455. for _, edit := range thisDiffs.Diffs {
  456. dump_before := ""
  457. if analyser.Debug {
  458. dump_before = file.Dump()
  459. }
  460. length := utf8.RuneCountInString(edit.Text)
  461. debug_error := func() {
  462. fmt.Fprintf(os.Stderr, "%s: internal diff error\n", change.To.Name)
  463. fmt.Fprintf(os.Stderr, "Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,
  464. length, utf8.RuneCountInString(pending.Text))
  465. if dump_before != "" {
  466. fmt.Fprintf(os.Stderr, "====TREE BEFORE====\n%s====END====\n", dump_before)
  467. }
  468. fmt.Fprintf(os.Stderr, "====TREE AFTER====\n%s====END====\n", file.Dump())
  469. }
  470. switch edit.Type {
  471. case diffmatchpatch.DiffEqual:
  472. if pending.Text != "" {
  473. apply(pending)
  474. pending.Text = ""
  475. }
  476. position += length
  477. case diffmatchpatch.DiffInsert:
  478. if pending.Text != "" {
  479. if pending.Type == diffmatchpatch.DiffInsert {
  480. debug_error()
  481. return errors.New("DiffInsert may not appear after DiffInsert")
  482. }
  483. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,
  484. utf8.RuneCountInString(pending.Text))
  485. if analyser.Debug {
  486. file.Validate()
  487. }
  488. position += length
  489. pending.Text = ""
  490. } else {
  491. pending = edit
  492. }
  493. case diffmatchpatch.DiffDelete:
  494. if pending.Text != "" {
  495. debug_error()
  496. return errors.New("DiffDelete may not appear after DiffInsert/DiffDelete")
  497. }
  498. pending = edit
  499. default:
  500. debug_error()
  501. return errors.New(fmt.Sprintf("diff operation is not supported: %d", edit.Type))
  502. }
  503. }
  504. if pending.Text != "" {
  505. apply(pending)
  506. pending.Text = ""
  507. }
  508. if file.Len() != thisDiffs.NewLinesOfCode {
  509. return errors.New(fmt.Sprintf("%s: internal integrity error dst %d != %d",
  510. change.To.Name, thisDiffs.NewLinesOfCode, file.Len()))
  511. }
  512. return nil
  513. }
  514. func (analyser *BurndownAnalysis) handleRename(from, to string) error {
  515. file, exists := analyser.files[from]
  516. if !exists {
  517. return errors.New(fmt.Sprintf("file %s does not exist", from))
  518. }
  519. analyser.files[to] = file
  520. delete(analyser.files, from)
  521. return nil
  522. }
  523. func (analyser *BurndownAnalysis) groupStatus() ([]int64, map[string][]int64, [][]int64) {
  524. granularity := analyser.Granularity
  525. if granularity == 0 {
  526. granularity = 1
  527. }
  528. day := analyser.day
  529. day++
  530. adjust := 0
  531. if day%granularity != 0 {
  532. adjust = 1
  533. }
  534. global := make([]int64, day/granularity+adjust)
  535. var group int64
  536. for i := 0; i < day; i++ {
  537. group += analyser.globalStatus[i]
  538. if (i % granularity) == (granularity - 1) {
  539. global[i/granularity] = group
  540. group = 0
  541. }
  542. }
  543. if day%granularity != 0 {
  544. global[len(global)-1] = group
  545. }
  546. locals := make(map[string][]int64)
  547. if analyser.TrackFiles {
  548. for key, file := range analyser.files {
  549. status := make([]int64, day/granularity+adjust)
  550. var group int64
  551. for i := 0; i < day; i++ {
  552. group += file.Status(1).(map[int]int64)[i]
  553. if (i % granularity) == (granularity - 1) {
  554. status[i/granularity] = group
  555. group = 0
  556. }
  557. }
  558. if day%granularity != 0 {
  559. status[len(status)-1] = group
  560. }
  561. locals[key] = status
  562. }
  563. }
  564. peoples := make([][]int64, len(analyser.people))
  565. for key, person := range analyser.people {
  566. status := make([]int64, day/granularity+adjust)
  567. var group int64
  568. for i := 0; i < day; i++ {
  569. group += person[i]
  570. if (i % granularity) == (granularity - 1) {
  571. status[i/granularity] = group
  572. group = 0
  573. }
  574. }
  575. if day%granularity != 0 {
  576. status[len(status)-1] = group
  577. }
  578. peoples[key] = status
  579. }
  580. return global, locals, peoples
  581. }
  582. func (analyser *BurndownAnalysis) updateHistories(
  583. globalStatus []int64, file_statuses map[string][]int64, people_statuses [][]int64, delta int) {
  584. for i := 0; i < delta; i++ {
  585. analyser.globalHistory = append(analyser.globalHistory, globalStatus)
  586. }
  587. to_delete := make([]string, 0)
  588. for key, fh := range analyser.fileHistories {
  589. ls, exists := file_statuses[key]
  590. if !exists {
  591. to_delete = append(to_delete, key)
  592. } else {
  593. for i := 0; i < delta; i++ {
  594. fh = append(fh, ls)
  595. }
  596. analyser.fileHistories[key] = fh
  597. }
  598. }
  599. for _, key := range to_delete {
  600. delete(analyser.fileHistories, key)
  601. }
  602. for key, ls := range file_statuses {
  603. fh, exists := analyser.fileHistories[key]
  604. if exists {
  605. continue
  606. }
  607. for i := 0; i < delta; i++ {
  608. fh = append(fh, ls)
  609. }
  610. analyser.fileHistories[key] = fh
  611. }
  612. for key, ph := range analyser.peopleHistories {
  613. ls := people_statuses[key]
  614. for i := 0; i < delta; i++ {
  615. ph = append(ph, ls)
  616. }
  617. analyser.peopleHistories[key] = ph
  618. }
  619. }
  620. func init() {
  621. Registry.Register(&BurndownAnalysis{})
  622. }