burndown.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. package hercules
  2. import (
  3. "bufio"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "os"
  8. "sort"
  9. "unicode/utf8"
  10. "github.com/gogo/protobuf/proto"
  11. "github.com/sergi/go-diff/diffmatchpatch"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  16. "gopkg.in/src-d/hercules.v3/pb"
  17. "gopkg.in/src-d/hercules.v3/stdout"
  18. )
  19. // BurndownAnalyser allows to gather the line burndown statistics for a Git repository.
  20. type BurndownAnalysis struct {
  21. // Granularity sets the size of each band - the number of days it spans.
  22. // Smaller values provide better resolution but require more work and eat more
  23. // memory. 30 days is usually enough.
  24. Granularity int
  25. // Sampling sets how detailed is the statistic - the size of the interval in
  26. // days between consecutive measurements. It is usually a good idea to set it
  27. // <= Granularity. Try 15 or 30.
  28. Sampling int
  29. // TrackFiles enables or disables the fine-grained per-file burndown analysis.
  30. // It does not change the top level burndown results.
  31. TrackFiles bool
  32. // The number of developers for which to collect the burndown stats. 0 disables it.
  33. PeopleNumber int
  34. // Debug activates the debugging mode. Analyse() runs slower in this mode
  35. // but it accurately checks all the intermediate states for invariant
  36. // violations.
  37. Debug bool
  38. // Repository points to the analysed Git repository struct from go-git.
  39. repository *git.Repository
  40. // globalStatus is the current daily alive number of lines; key is the number
  41. // of days from the beginning of the history.
  42. globalStatus map[int]int64
  43. // globalHistory is the weekly snapshots of globalStatus.
  44. globalHistory [][]int64
  45. // fileHistories is the weekly snapshots of each file's status.
  46. fileHistories map[string][][]int64
  47. // peopleHistories is the weekly snapshots of each person's status.
  48. peopleHistories [][][]int64
  49. // files is the mapping <file path> -> *File.
  50. files map[string]*File
  51. // matrix is the mutual deletions and self insertions.
  52. matrix []map[int]int64
  53. // people is the people's individual time stats.
  54. people []map[int]int64
  55. // day is the most recent day index processed.
  56. day int
  57. // previousDay is the day from the previous sample period -
  58. // different from DaysSinceStart.previousDay.
  59. previousDay int
  60. // references IdentityDetector.ReversedPeopleDict
  61. reversedPeopleDict []string
  62. }
  63. type BurndownResult struct {
  64. GlobalHistory [][]int64
  65. FileHistories map[string][][]int64
  66. PeopleHistories [][][]int64
  67. PeopleMatrix [][]int64
  68. }
  69. const (
  70. ConfigBurndownGranularity = "Burndown.Granularity"
  71. ConfigBurndownSampling = "Burndown.Sampling"
  72. ConfigBurndownTrackFiles = "Burndown.TrackFiles"
  73. ConfigBurndownTrackPeople = "Burndown.TrackPeople"
  74. ConfigBurndownDebug = "Burndown.Debug"
  75. )
  76. func (analyser *BurndownAnalysis) Name() string {
  77. return "Burndown"
  78. }
  79. func (analyser *BurndownAnalysis) Provides() []string {
  80. return []string{}
  81. }
  82. func (analyser *BurndownAnalysis) Requires() []string {
  83. arr := [...]string{"file_diff", "changes", "blob_cache", "day", "author"}
  84. return arr[:]
  85. }
  86. func (analyser *BurndownAnalysis) ListConfigurationOptions() []ConfigurationOption {
  87. options := [...]ConfigurationOption{{
  88. Name: ConfigBurndownGranularity,
  89. Description: "How many days there are in a single band.",
  90. Flag: "granularity",
  91. Type: IntConfigurationOption,
  92. Default: 30}, {
  93. Name: ConfigBurndownSampling,
  94. Description: "How frequently to record the state in days.",
  95. Flag: "sampling",
  96. Type: IntConfigurationOption,
  97. Default: 30}, {
  98. Name: ConfigBurndownTrackFiles,
  99. Description: "Record detailed statistics per each file.",
  100. Flag: "burndown-files",
  101. Type: BoolConfigurationOption,
  102. Default: false}, {
  103. Name: ConfigBurndownTrackPeople,
  104. Description: "Record detailed statistics per each developer.",
  105. Flag: "burndown-people",
  106. Type: BoolConfigurationOption,
  107. Default: false}, {
  108. Name: ConfigBurndownDebug,
  109. Description: "Validate the trees on each step.",
  110. Flag: "burndown-debug",
  111. Type: BoolConfigurationOption,
  112. Default: false},
  113. }
  114. return options[:]
  115. }
  116. func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) {
  117. if val, exists := facts[ConfigBurndownGranularity].(int); exists {
  118. analyser.Granularity = val
  119. }
  120. if val, exists := facts[ConfigBurndownSampling].(int); exists {
  121. analyser.Sampling = val
  122. }
  123. if val, exists := facts[ConfigBurndownTrackFiles].(bool); exists {
  124. analyser.TrackFiles = val
  125. }
  126. if people, exists := facts[ConfigBurndownTrackPeople].(bool); people {
  127. if val, exists := facts[FactIdentityDetectorPeopleCount].(int); exists {
  128. analyser.PeopleNumber = val
  129. analyser.reversedPeopleDict = facts[FactIdentityDetectorReversedPeopleDict].([]string)
  130. }
  131. } else if exists {
  132. analyser.PeopleNumber = 0
  133. }
  134. if val, exists := facts[ConfigBurndownDebug].(bool); exists {
  135. analyser.Debug = val
  136. }
  137. }
  138. func (analyser *BurndownAnalysis) Flag() string {
  139. return "burndown"
  140. }
  141. func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) {
  142. if analyser.Granularity <= 0 {
  143. fmt.Fprintln(os.Stderr, "Warning: adjusted the granularity to 30 days")
  144. analyser.Granularity = 30
  145. }
  146. if analyser.Sampling <= 0 {
  147. fmt.Fprintln(os.Stderr, "Warning: adjusted the sampling to 30 days")
  148. analyser.Sampling = 30
  149. }
  150. analyser.repository = repository
  151. analyser.globalStatus = map[int]int64{}
  152. analyser.globalHistory = [][]int64{}
  153. analyser.fileHistories = map[string][][]int64{}
  154. analyser.peopleHistories = make([][][]int64, analyser.PeopleNumber)
  155. analyser.files = map[string]*File{}
  156. analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)
  157. analyser.people = make([]map[int]int64, analyser.PeopleNumber)
  158. analyser.day = 0
  159. analyser.previousDay = 0
  160. }
  161. func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  162. sampling := analyser.Sampling
  163. if sampling == 0 {
  164. sampling = 1
  165. }
  166. author := deps["author"].(int)
  167. analyser.day = deps["day"].(int)
  168. delta := (analyser.day / sampling) - (analyser.previousDay / sampling)
  169. if delta > 0 {
  170. analyser.previousDay = analyser.day
  171. gs, fss, pss := analyser.groupStatus()
  172. analyser.updateHistories(gs, fss, pss, delta)
  173. }
  174. cache := deps["blob_cache"].(map[plumbing.Hash]*object.Blob)
  175. treeDiffs := deps["changes"].(object.Changes)
  176. fileDiffs := deps["file_diff"].(map[string]FileDiffData)
  177. for _, change := range treeDiffs {
  178. action, err := change.Action()
  179. if err != nil {
  180. return nil, err
  181. }
  182. switch action {
  183. case merkletrie.Insert:
  184. err = analyser.handleInsertion(change, author, cache)
  185. case merkletrie.Delete:
  186. err = analyser.handleDeletion(change, author, cache)
  187. case merkletrie.Modify:
  188. err = analyser.handleModification(change, author, cache, fileDiffs)
  189. }
  190. if err != nil {
  191. return nil, err
  192. }
  193. }
  194. return nil, nil
  195. }
  196. // Finalize() returns the list of snapshots of the cumulative line edit times
  197. // and the similar lists for every file which is alive in HEAD.
  198. // The number of snapshots (the first dimension >[]<[]int64) depends on
  199. // Analyser.Sampling (the more Sampling, the less the value); the length of
  200. // each snapshot depends on Analyser.Granularity (the more Granularity,
  201. // the less the value).
  202. func (analyser *BurndownAnalysis) Finalize() interface{} {
  203. gs, fss, pss := analyser.groupStatus()
  204. analyser.updateHistories(gs, fss, pss, 1)
  205. for key, statuses := range analyser.fileHistories {
  206. if len(statuses) == len(analyser.globalHistory) {
  207. continue
  208. }
  209. padding := make([][]int64, len(analyser.globalHistory)-len(statuses))
  210. for i := range padding {
  211. padding[i] = make([]int64, len(analyser.globalStatus))
  212. }
  213. analyser.fileHistories[key] = append(padding, statuses...)
  214. }
  215. peopleMatrix := make([][]int64, analyser.PeopleNumber)
  216. for i, row := range analyser.matrix {
  217. mrow := make([]int64, analyser.PeopleNumber+2)
  218. peopleMatrix[i] = mrow
  219. for key, val := range row {
  220. if key == MISSING_AUTHOR {
  221. key = -1
  222. } else if key == SELF_AUTHOR {
  223. key = -2
  224. }
  225. mrow[key+2] = val
  226. }
  227. }
  228. return BurndownResult{
  229. GlobalHistory: analyser.globalHistory,
  230. FileHistories: analyser.fileHistories,
  231. PeopleHistories: analyser.peopleHistories,
  232. PeopleMatrix: peopleMatrix,
  233. }
  234. }
  235. func (analyser *BurndownAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
  236. burndownResult := result.(BurndownResult)
  237. if binary {
  238. return analyser.serializeBinary(&burndownResult, writer)
  239. }
  240. analyser.serializeText(&burndownResult, writer)
  241. return nil
  242. }
  243. func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
  244. fmt.Fprintln(writer, " granularity:", analyser.Granularity)
  245. fmt.Fprintln(writer, " sampling:", analyser.Sampling)
  246. stdout.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
  247. if len(result.FileHistories) > 0 {
  248. fmt.Fprintln(writer, " files:")
  249. keys := sortedKeys(result.FileHistories)
  250. for _, key := range keys {
  251. stdout.PrintMatrix(writer, result.FileHistories[key], 4, key, true)
  252. }
  253. }
  254. if len(result.PeopleHistories) > 0 {
  255. fmt.Fprintln(writer, " people_sequence:")
  256. for key := range result.PeopleHistories {
  257. fmt.Fprintln(writer, " - "+stdout.SafeString(analyser.reversedPeopleDict[key]))
  258. }
  259. fmt.Fprintln(writer, " people:")
  260. for key, val := range result.PeopleHistories {
  261. stdout.PrintMatrix(writer, val, 4, analyser.reversedPeopleDict[key], true)
  262. }
  263. fmt.Fprintln(writer, " people_interaction: |-")
  264. stdout.PrintMatrix(writer, result.PeopleMatrix, 4, "", false)
  265. }
  266. }
  267. func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer io.Writer) error {
  268. message := pb.BurndownAnalysisResults{
  269. Granularity: int32(analyser.Granularity),
  270. Sampling: int32(analyser.Sampling),
  271. Project: pb.ToBurndownSparseMatrix(result.GlobalHistory, "project"),
  272. }
  273. if len(result.FileHistories) > 0 {
  274. message.Files = make([]*pb.BurndownSparseMatrix, len(result.FileHistories))
  275. keys := sortedKeys(result.FileHistories)
  276. i := 0
  277. for _, key := range keys {
  278. message.Files[i] = pb.ToBurndownSparseMatrix(
  279. result.FileHistories[key], key)
  280. i++
  281. }
  282. }
  283. if len(result.PeopleHistories) > 0 {
  284. message.People = make(
  285. []*pb.BurndownSparseMatrix, len(result.PeopleHistories))
  286. for key, val := range result.PeopleHistories {
  287. message.People[key] = pb.ToBurndownSparseMatrix(val, analyser.reversedPeopleDict[key])
  288. }
  289. message.PeopleInteraction = pb.DenseToCompressedSparseRowMatrix(result.PeopleMatrix)
  290. }
  291. serialized, err := proto.Marshal(&message)
  292. if err != nil {
  293. return err
  294. }
  295. writer.Write(serialized)
  296. return nil
  297. }
  298. func sortedKeys(m map[string][][]int64) []string {
  299. keys := make([]string, 0, len(m))
  300. for k := range m {
  301. keys = append(keys, k)
  302. }
  303. sort.Strings(keys)
  304. return keys
  305. }
  306. func checkClose(c io.Closer) {
  307. if err := c.Close(); err != nil {
  308. panic(err)
  309. }
  310. }
  311. func countLines(file *object.Blob) (int, error) {
  312. reader, err := file.Reader()
  313. if err != nil {
  314. return 0, err
  315. }
  316. defer checkClose(reader)
  317. var scanner *bufio.Scanner
  318. buffer := make([]byte, bufio.MaxScanTokenSize)
  319. counter := 0
  320. for scanner == nil || scanner.Err() == bufio.ErrTooLong {
  321. if scanner != nil && !utf8.Valid(scanner.Bytes()) {
  322. return -1, errors.New("binary")
  323. }
  324. scanner = bufio.NewScanner(reader)
  325. scanner.Buffer(buffer, 0)
  326. for scanner.Scan() {
  327. if !utf8.Valid(scanner.Bytes()) {
  328. return -1, errors.New("binary")
  329. }
  330. counter++
  331. }
  332. }
  333. return counter, nil
  334. }
  335. func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {
  336. if analyser.PeopleNumber == 0 {
  337. return day
  338. }
  339. result := day
  340. result |= person << 14
  341. // This effectively means max 16384 days (>44 years) and (131072 - 2) devs
  342. return result
  343. }
  344. func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {
  345. if analyser.PeopleNumber == 0 {
  346. return MISSING_AUTHOR, value
  347. }
  348. return value >> 14, value & 0x3FFF
  349. }
  350. func (analyser *BurndownAnalysis) updateStatus(
  351. status interface{}, _ int, previous_time_ int, delta int) {
  352. _, previous_time := analyser.unpackPersonWithDay(previous_time_)
  353. status.(map[int]int64)[previous_time] += int64(delta)
  354. }
  355. func (analyser *BurndownAnalysis) updatePeople(people interface{}, _ int, previous_time_ int, delta int) {
  356. old_author, previous_time := analyser.unpackPersonWithDay(previous_time_)
  357. if old_author == MISSING_AUTHOR {
  358. return
  359. }
  360. casted := people.([]map[int]int64)
  361. stats := casted[old_author]
  362. if stats == nil {
  363. stats = map[int]int64{}
  364. casted[old_author] = stats
  365. }
  366. stats[previous_time] += int64(delta)
  367. }
  368. func (analyser *BurndownAnalysis) updateMatrix(
  369. matrix_ interface{}, current_time int, previous_time int, delta int) {
  370. matrix := matrix_.([]map[int]int64)
  371. new_author, _ := analyser.unpackPersonWithDay(current_time)
  372. old_author, _ := analyser.unpackPersonWithDay(previous_time)
  373. if old_author == MISSING_AUTHOR {
  374. return
  375. }
  376. if new_author == old_author && delta > 0 {
  377. new_author = SELF_AUTHOR
  378. }
  379. row := matrix[old_author]
  380. if row == nil {
  381. row = map[int]int64{}
  382. matrix[old_author] = row
  383. }
  384. cell, exists := row[new_author]
  385. if !exists {
  386. row[new_author] = 0
  387. cell = 0
  388. }
  389. row[new_author] = cell + int64(delta)
  390. }
  391. func (analyser *BurndownAnalysis) newFile(
  392. author int, day int, size int, global map[int]int64, people []map[int]int64,
  393. matrix []map[int]int64) *File {
  394. statuses := make([]Status, 1)
  395. statuses[0] = NewStatus(global, analyser.updateStatus)
  396. if analyser.TrackFiles {
  397. statuses = append(statuses, NewStatus(map[int]int64{}, analyser.updateStatus))
  398. }
  399. if analyser.PeopleNumber > 0 {
  400. statuses = append(statuses, NewStatus(people, analyser.updatePeople))
  401. statuses = append(statuses, NewStatus(matrix, analyser.updateMatrix))
  402. day = analyser.packPersonWithDay(author, day)
  403. }
  404. return NewFile(day, size, statuses...)
  405. }
  406. func (analyser *BurndownAnalysis) handleInsertion(
  407. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  408. blob := cache[change.To.TreeEntry.Hash]
  409. lines, err := countLines(blob)
  410. if err != nil {
  411. if err.Error() == "binary" {
  412. return nil
  413. }
  414. return err
  415. }
  416. name := change.To.Name
  417. file, exists := analyser.files[name]
  418. if exists {
  419. return errors.New(fmt.Sprintf("file %s already exists", name))
  420. }
  421. file = analyser.newFile(
  422. author, analyser.day, lines, analyser.globalStatus, analyser.people, analyser.matrix)
  423. analyser.files[name] = file
  424. return nil
  425. }
  426. func (analyser *BurndownAnalysis) handleDeletion(
  427. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob) error {
  428. blob := cache[change.From.TreeEntry.Hash]
  429. lines, err := countLines(blob)
  430. if err != nil {
  431. if err.Error() == "binary" {
  432. return nil
  433. }
  434. return err
  435. }
  436. name := change.From.Name
  437. file := analyser.files[name]
  438. file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)
  439. delete(analyser.files, name)
  440. return nil
  441. }
  442. func (analyser *BurndownAnalysis) handleModification(
  443. change *object.Change, author int, cache map[plumbing.Hash]*object.Blob,
  444. diffs map[string]FileDiffData) error {
  445. file, exists := analyser.files[change.From.Name]
  446. if !exists {
  447. return analyser.handleInsertion(change, author, cache)
  448. }
  449. // possible rename
  450. if change.To.Name != change.From.Name {
  451. err := analyser.handleRename(change.From.Name, change.To.Name)
  452. if err != nil {
  453. return err
  454. }
  455. }
  456. thisDiffs := diffs[change.To.Name]
  457. if file.Len() != thisDiffs.OldLinesOfCode {
  458. fmt.Fprintf(os.Stderr, "====TREE====\n%s", file.Dump())
  459. return errors.New(fmt.Sprintf("%s: internal integrity error src %d != %d %s -> %s",
  460. change.To.Name, thisDiffs.OldLinesOfCode, file.Len(),
  461. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String()))
  462. }
  463. // we do not call RunesToDiffLines so the number of lines equals
  464. // to the rune count
  465. position := 0
  466. pending := diffmatchpatch.Diff{Text: ""}
  467. apply := func(edit diffmatchpatch.Diff) {
  468. length := utf8.RuneCountInString(edit.Text)
  469. if edit.Type == diffmatchpatch.DiffInsert {
  470. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)
  471. position += length
  472. } else {
  473. file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)
  474. }
  475. if analyser.Debug {
  476. file.Validate()
  477. }
  478. }
  479. for _, edit := range thisDiffs.Diffs {
  480. dump_before := ""
  481. if analyser.Debug {
  482. dump_before = file.Dump()
  483. }
  484. length := utf8.RuneCountInString(edit.Text)
  485. debug_error := func() {
  486. fmt.Fprintf(os.Stderr, "%s: internal diff error\n", change.To.Name)
  487. fmt.Fprintf(os.Stderr, "Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,
  488. length, utf8.RuneCountInString(pending.Text))
  489. if dump_before != "" {
  490. fmt.Fprintf(os.Stderr, "====TREE BEFORE====\n%s====END====\n", dump_before)
  491. }
  492. fmt.Fprintf(os.Stderr, "====TREE AFTER====\n%s====END====\n", file.Dump())
  493. }
  494. switch edit.Type {
  495. case diffmatchpatch.DiffEqual:
  496. if pending.Text != "" {
  497. apply(pending)
  498. pending.Text = ""
  499. }
  500. position += length
  501. case diffmatchpatch.DiffInsert:
  502. if pending.Text != "" {
  503. if pending.Type == diffmatchpatch.DiffInsert {
  504. debug_error()
  505. return errors.New("DiffInsert may not appear after DiffInsert")
  506. }
  507. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,
  508. utf8.RuneCountInString(pending.Text))
  509. if analyser.Debug {
  510. file.Validate()
  511. }
  512. position += length
  513. pending.Text = ""
  514. } else {
  515. pending = edit
  516. }
  517. case diffmatchpatch.DiffDelete:
  518. if pending.Text != "" {
  519. debug_error()
  520. return errors.New("DiffDelete may not appear after DiffInsert/DiffDelete")
  521. }
  522. pending = edit
  523. default:
  524. debug_error()
  525. return errors.New(fmt.Sprintf("diff operation is not supported: %d", edit.Type))
  526. }
  527. }
  528. if pending.Text != "" {
  529. apply(pending)
  530. pending.Text = ""
  531. }
  532. if file.Len() != thisDiffs.NewLinesOfCode {
  533. return errors.New(fmt.Sprintf("%s: internal integrity error dst %d != %d",
  534. change.To.Name, thisDiffs.NewLinesOfCode, file.Len()))
  535. }
  536. return nil
  537. }
  538. func (analyser *BurndownAnalysis) handleRename(from, to string) error {
  539. file, exists := analyser.files[from]
  540. if !exists {
  541. return errors.New(fmt.Sprintf("file %s does not exist", from))
  542. }
  543. analyser.files[to] = file
  544. delete(analyser.files, from)
  545. return nil
  546. }
  547. func (analyser *BurndownAnalysis) groupStatus() ([]int64, map[string][]int64, [][]int64) {
  548. granularity := analyser.Granularity
  549. if granularity == 0 {
  550. granularity = 1
  551. }
  552. day := analyser.day
  553. day++
  554. adjust := 0
  555. if day%granularity != 0 {
  556. adjust = 1
  557. }
  558. global := make([]int64, day/granularity+adjust)
  559. var group int64
  560. for i := 0; i < day; i++ {
  561. group += analyser.globalStatus[i]
  562. if (i % granularity) == (granularity - 1) {
  563. global[i/granularity] = group
  564. group = 0
  565. }
  566. }
  567. if day%granularity != 0 {
  568. global[len(global)-1] = group
  569. }
  570. locals := make(map[string][]int64)
  571. if analyser.TrackFiles {
  572. for key, file := range analyser.files {
  573. status := make([]int64, day/granularity+adjust)
  574. var group int64
  575. for i := 0; i < day; i++ {
  576. group += file.Status(1).(map[int]int64)[i]
  577. if (i % granularity) == (granularity - 1) {
  578. status[i/granularity] = group
  579. group = 0
  580. }
  581. }
  582. if day%granularity != 0 {
  583. status[len(status)-1] = group
  584. }
  585. locals[key] = status
  586. }
  587. }
  588. peoples := make([][]int64, len(analyser.people))
  589. for key, person := range analyser.people {
  590. status := make([]int64, day/granularity+adjust)
  591. var group int64
  592. for i := 0; i < day; i++ {
  593. group += person[i]
  594. if (i % granularity) == (granularity - 1) {
  595. status[i/granularity] = group
  596. group = 0
  597. }
  598. }
  599. if day%granularity != 0 {
  600. status[len(status)-1] = group
  601. }
  602. peoples[key] = status
  603. }
  604. return global, locals, peoples
  605. }
  606. func (analyser *BurndownAnalysis) updateHistories(
  607. globalStatus []int64, file_statuses map[string][]int64, people_statuses [][]int64, delta int) {
  608. for i := 0; i < delta; i++ {
  609. analyser.globalHistory = append(analyser.globalHistory, globalStatus)
  610. }
  611. to_delete := make([]string, 0)
  612. for key, fh := range analyser.fileHistories {
  613. ls, exists := file_statuses[key]
  614. if !exists {
  615. to_delete = append(to_delete, key)
  616. } else {
  617. for i := 0; i < delta; i++ {
  618. fh = append(fh, ls)
  619. }
  620. analyser.fileHistories[key] = fh
  621. }
  622. }
  623. for _, key := range to_delete {
  624. delete(analyser.fileHistories, key)
  625. }
  626. for key, ls := range file_statuses {
  627. fh, exists := analyser.fileHistories[key]
  628. if exists {
  629. continue
  630. }
  631. for i := 0; i < delta; i++ {
  632. fh = append(fh, ls)
  633. }
  634. analyser.fileHistories[key] = fh
  635. }
  636. for key, ph := range analyser.peopleHistories {
  637. ls := people_statuses[key]
  638. for i := 0; i < delta; i++ {
  639. ph = append(ph, ls)
  640. }
  641. analyser.peopleHistories[key] = ph
  642. }
  643. }
  644. func init() {
  645. Registry.Register(&BurndownAnalysis{})
  646. }