burndown.go 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. package leaves
  2. import (
  3. "errors"
  4. "fmt"
  5. "io"
  6. "log"
  7. "sort"
  8. "sync"
  9. "unicode/utf8"
  10. "github.com/gogo/protobuf/proto"
  11. "github.com/sergi/go-diff/diffmatchpatch"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  16. "gopkg.in/src-d/hercules.v5/internal/burndown"
  17. "gopkg.in/src-d/hercules.v5/internal/core"
  18. "gopkg.in/src-d/hercules.v5/internal/pb"
  19. items "gopkg.in/src-d/hercules.v5/internal/plumbing"
  20. "gopkg.in/src-d/hercules.v5/internal/plumbing/identity"
  21. "gopkg.in/src-d/hercules.v5/internal/yaml"
  22. )
  23. // BurndownAnalysis allows to gather the line burndown statistics for a Git repository.
  24. // It is a LeafPipelineItem.
  25. // Reference: https://erikbern.com/2016/12/05/the-half-life-of-code.html
  26. type BurndownAnalysis struct {
  27. // Granularity sets the size of each band - the number of days it spans.
  28. // Smaller values provide better resolution but require more work and eat more
  29. // memory. 30 days is usually enough.
  30. Granularity int
  31. // Sampling sets how detailed is the statistic - the size of the interval in
  32. // days between consecutive measurements. It may not be greater than Granularity. Try 15 or 30.
  33. Sampling int
  34. // TrackFiles enables or disables the fine-grained per-file burndown analysis.
  35. // It does not change the project level burndown results.
  36. TrackFiles bool
  37. // The number of developers for which to collect the burndown stats. 0 disables it.
  38. PeopleNumber int
  39. // Debug activates the debugging mode. Analyse() runs slower in this mode
  40. // but it accurately checks all the intermediate states for invariant
  41. // violations.
  42. Debug bool
  43. // Repository points to the analysed Git repository struct from go-git.
  44. repository *git.Repository
  45. // globalHistory is the daily deltas of daily line counts.
  46. // E.g. day 0: day 0 +50 lines
  47. // day 10: day 0 -10 lines; day 10 +20 lines
  48. // day 12: day 0 -5 lines; day 10 -3 lines; day 12 +10 lines
  49. // map [0] [0] = 50
  50. // map[10] [0] = -10
  51. // map[10][10] = 20
  52. // map[12] [0] = -5
  53. // map[12][10] = -3
  54. // map[12][12] = 10
  55. globalHistory sparseHistory
  56. // fileHistories is the daily deltas of each file's daily line counts.
  57. fileHistories map[string]sparseHistory
  58. // peopleHistories is the daily deltas of each person's daily line counts.
  59. peopleHistories []sparseHistory
  60. // files is the mapping <file path> -> *File.
  61. files map[string]*burndown.File
  62. // mergedFiles is used during merges to record the real file hashes
  63. mergedFiles map[string]bool
  64. // mergedAuthor of the processed merge commit
  65. mergedAuthor int
  66. // renames is a quick and dirty solution for the "future branch renames" problem.
  67. renames map[string]string
  68. // matrix is the mutual deletions and self insertions.
  69. matrix []map[int]int64
  70. // day is the most recent day index processed.
  71. day int
  72. // previousDay is the day from the previous sample period -
  73. // different from DaysSinceStart.previousDay.
  74. previousDay int
  75. // references IdentityDetector.ReversedPeopleDict
  76. reversedPeopleDict []string
  77. }
  78. // BurndownResult carries the result of running BurndownAnalysis - it is returned by
  79. // BurndownAnalysis.Finalize().
  80. type BurndownResult struct {
  81. // [number of samples][number of bands]
  82. // The number of samples depends on Sampling: the less Sampling, the bigger the number.
  83. // The number of bands depends on Granularity: the less Granularity, the bigger the number.
  84. GlobalHistory DenseHistory
  85. // The key is the path inside the Git repository. The value's dimensions are the same as
  86. // in GlobalHistory.
  87. FileHistories map[string]DenseHistory
  88. // [number of people][number of samples][number of bands]
  89. PeopleHistories []DenseHistory
  90. // [number of people][number of people + 2]
  91. // The first element is the total number of lines added by the author.
  92. // The second element is the number of removals by unidentified authors (outside reversedPeopleDict).
  93. // The rest of the elements are equal the number of line removals by the corresponding
  94. // authors in reversedPeopleDict: 2 -> 0, 3 -> 1, etc.
  95. PeopleMatrix DenseHistory
  96. // The following members are private.
  97. // reversedPeopleDict is borrowed from IdentityDetector and becomes available after
  98. // Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
  99. // facts[FactIdentityDetectorReversedPeopleDict].
  100. reversedPeopleDict []string
  101. // sampling and granularity are copied from BurndownAnalysis and stored for service purposes
  102. // such as merging several results together.
  103. sampling int
  104. granularity int
  105. }
  106. const (
  107. // ConfigBurndownGranularity is the name of the option to set BurndownAnalysis.Granularity.
  108. ConfigBurndownGranularity = "Burndown.Granularity"
  109. // ConfigBurndownSampling is the name of the option to set BurndownAnalysis.Sampling.
  110. ConfigBurndownSampling = "Burndown.Sampling"
  111. // ConfigBurndownTrackFiles enables burndown collection for files.
  112. ConfigBurndownTrackFiles = "Burndown.TrackFiles"
  113. // ConfigBurndownTrackPeople enables burndown collection for authors.
  114. ConfigBurndownTrackPeople = "Burndown.TrackPeople"
  115. // ConfigBurndownDebug enables some extra debug assertions.
  116. ConfigBurndownDebug = "Burndown.Debug"
  117. // DefaultBurndownGranularity is the default number of days for BurndownAnalysis.Granularity
  118. // and BurndownAnalysis.Sampling.
  119. DefaultBurndownGranularity = 30
  120. // authorSelf is the internal author index which is used in BurndownAnalysis.Finalize() to
  121. // format the author overwrites matrix.
  122. authorSelf = (1 << (32 - burndown.TreeMaxBinPower)) - 2
  123. )
  124. type sparseHistory = map[int]map[int]int64
  125. // DenseHistory is the matrix [number of samples][number of bands] -> number of lines.
  126. type DenseHistory = [][]int64
  127. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  128. func (analyser *BurndownAnalysis) Name() string {
  129. return "Burndown"
  130. }
  131. // Provides returns the list of names of entities which are produced by this PipelineItem.
  132. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  133. // to this list. Also used by core.Registry to build the global map of providers.
  134. func (analyser *BurndownAnalysis) Provides() []string {
  135. return []string{}
  136. }
  137. // Requires returns the list of names of entities which are needed by this PipelineItem.
  138. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  139. // entities are Provides() upstream.
  140. func (analyser *BurndownAnalysis) Requires() []string {
  141. arr := [...]string{
  142. items.DependencyFileDiff, items.DependencyTreeChanges, items.DependencyBlobCache,
  143. items.DependencyDay, identity.DependencyAuthor}
  144. return arr[:]
  145. }
  146. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  147. func (analyser *BurndownAnalysis) ListConfigurationOptions() []core.ConfigurationOption {
  148. options := [...]core.ConfigurationOption{{
  149. Name: ConfigBurndownGranularity,
  150. Description: "How many days there are in a single band.",
  151. Flag: "granularity",
  152. Type: core.IntConfigurationOption,
  153. Default: DefaultBurndownGranularity}, {
  154. Name: ConfigBurndownSampling,
  155. Description: "How frequently to record the state in days.",
  156. Flag: "sampling",
  157. Type: core.IntConfigurationOption,
  158. Default: DefaultBurndownGranularity}, {
  159. Name: ConfigBurndownTrackFiles,
  160. Description: "Record detailed statistics per each file.",
  161. Flag: "burndown-files",
  162. Type: core.BoolConfigurationOption,
  163. Default: false}, {
  164. Name: ConfigBurndownTrackPeople,
  165. Description: "Record detailed statistics per each developer.",
  166. Flag: "burndown-people",
  167. Type: core.BoolConfigurationOption,
  168. Default: false}, {
  169. Name: ConfigBurndownDebug,
  170. Description: "Validate the trees on each step.",
  171. Flag: "burndown-debug",
  172. Type: core.BoolConfigurationOption,
  173. Default: false},
  174. }
  175. return options[:]
  176. }
  177. // Configure sets the properties previously published by ListConfigurationOptions().
  178. func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) {
  179. if val, exists := facts[ConfigBurndownGranularity].(int); exists {
  180. analyser.Granularity = val
  181. }
  182. if val, exists := facts[ConfigBurndownSampling].(int); exists {
  183. analyser.Sampling = val
  184. }
  185. if val, exists := facts[ConfigBurndownTrackFiles].(bool); exists {
  186. analyser.TrackFiles = val
  187. }
  188. if people, exists := facts[ConfigBurndownTrackPeople].(bool); people {
  189. if val, exists := facts[identity.FactIdentityDetectorPeopleCount].(int); exists {
  190. analyser.PeopleNumber = val
  191. analyser.reversedPeopleDict = facts[identity.FactIdentityDetectorReversedPeopleDict].([]string)
  192. }
  193. } else if exists {
  194. analyser.PeopleNumber = 0
  195. }
  196. if val, exists := facts[ConfigBurndownDebug].(bool); exists {
  197. analyser.Debug = val
  198. }
  199. }
  200. // Flag for the command line switch which enables this analysis.
  201. func (analyser *BurndownAnalysis) Flag() string {
  202. return "burndown"
  203. }
  204. // Description returns the text which explains what the analysis is doing.
  205. func (analyser *BurndownAnalysis) Description() string {
  206. return "Line burndown stats indicate the numbers of lines which were last edited within " +
  207. "specific time intervals through time. Search for \"git-of-theseus\" in the internet."
  208. }
  209. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  210. // calls. The repository which is going to be analysed is supplied as an argument.
  211. func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) {
  212. if analyser.Granularity <= 0 {
  213. log.Printf("Warning: adjusted the granularity to %d days\n",
  214. DefaultBurndownGranularity)
  215. analyser.Granularity = DefaultBurndownGranularity
  216. }
  217. if analyser.Sampling <= 0 {
  218. log.Printf("Warning: adjusted the sampling to %d days\n",
  219. DefaultBurndownGranularity)
  220. analyser.Sampling = DefaultBurndownGranularity
  221. }
  222. if analyser.Sampling > analyser.Granularity {
  223. log.Printf("Warning: granularity may not be less than sampling, adjusted to %d\n",
  224. analyser.Granularity)
  225. analyser.Sampling = analyser.Granularity
  226. }
  227. analyser.repository = repository
  228. analyser.globalHistory = sparseHistory{}
  229. analyser.fileHistories = map[string]sparseHistory{}
  230. analyser.peopleHistories = make([]sparseHistory, analyser.PeopleNumber)
  231. analyser.files = map[string]*burndown.File{}
  232. analyser.mergedFiles = map[string]bool{}
  233. analyser.mergedAuthor = identity.AuthorMissing
  234. analyser.renames = map[string]string{}
  235. analyser.matrix = make([]map[int]int64, analyser.PeopleNumber)
  236. analyser.day = 0
  237. analyser.previousDay = 0
  238. }
  239. // Consume runs this PipelineItem on the next commit's data.
  240. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  241. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  242. // This function returns the mapping with analysis results. The keys must be the same as
  243. // in Provides(). If there was an error, nil is returned.
  244. func (analyser *BurndownAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  245. author := deps[identity.DependencyAuthor].(int)
  246. day := deps[items.DependencyDay].(int)
  247. if !deps[core.DependencyIsMerge].(bool) {
  248. analyser.day = day
  249. analyser.onNewDay()
  250. } else {
  251. // effectively disables the status updates if the commit is a merge
  252. // we will analyse the conflicts resolution in Merge()
  253. analyser.day = burndown.TreeMergeMark
  254. analyser.mergedFiles = map[string]bool{}
  255. analyser.mergedAuthor = author
  256. }
  257. cache := deps[items.DependencyBlobCache].(map[plumbing.Hash]*items.CachedBlob)
  258. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  259. fileDiffs := deps[items.DependencyFileDiff].(map[string]items.FileDiffData)
  260. for _, change := range treeDiffs {
  261. action, _ := change.Action()
  262. if deps["commit"].(*object.Commit).Hash.String() == "a6667d96c5e4aca92612295d549541146dd6e74a" {
  263. fmt.Println("a6667d96c5e4aca92612295d549541146dd6e74a", action, change)
  264. }
  265. var err error
  266. switch action {
  267. case merkletrie.Insert:
  268. err = analyser.handleInsertion(change, author, cache)
  269. case merkletrie.Delete:
  270. err = analyser.handleDeletion(change, author, cache)
  271. case merkletrie.Modify:
  272. err = analyser.handleModification(change, author, cache, fileDiffs)
  273. }
  274. if err != nil {
  275. return nil, err
  276. }
  277. }
  278. // in case there is a merge analyser.day equals to TreeMergeMark
  279. analyser.day = day
  280. return nil, nil
  281. }
  282. // Fork clones this item. Everything is copied by reference except the files
  283. // which are copied by value.
  284. func (analyser *BurndownAnalysis) Fork(n int) []core.PipelineItem {
  285. result := make([]core.PipelineItem, n)
  286. for i := range result {
  287. clone := *analyser
  288. clone.files = map[string]*burndown.File{}
  289. for key, file := range analyser.files {
  290. clone.files[key] = file.Clone(false)
  291. }
  292. result[i] = &clone
  293. }
  294. return result
  295. }
  296. // Merge combines several items together. We apply the special file merging logic here.
  297. func (analyser *BurndownAnalysis) Merge(branches []core.PipelineItem) {
  298. all := make([]*BurndownAnalysis, len(branches) + 1)
  299. all[0] = analyser
  300. for i, branch := range branches {
  301. all[i+1] = branch.(*BurndownAnalysis)
  302. }
  303. keys := map[string]bool{}
  304. for _, burn := range all {
  305. for key, val := range burn.mergedFiles {
  306. // (*)
  307. // there can be contradicting flags,
  308. // e.g. item was renamed and a new item written on its place
  309. // this may be not exactly accurate
  310. keys[key] = keys[key] || val
  311. }
  312. }
  313. for key, val := range keys {
  314. if !val {
  315. for _, burn := range all {
  316. delete(burn.files, key)
  317. }
  318. continue
  319. }
  320. files := make([]*burndown.File, 0, len(all))
  321. for _, burn := range all {
  322. file := burn.files[key]
  323. if file != nil {
  324. // file can be nil if it is considered binary in this branch
  325. files = append(files, file)
  326. }
  327. }
  328. if len(files) == 0 {
  329. // so we could be wrong in (*) and there is no such file eventually
  330. // it could be also removed in the merge commit itself
  331. continue
  332. }
  333. if len(files) > 1 {
  334. files[0].Merge(analyser.packPersonWithDay(analyser.mergedAuthor, analyser.day), files[1:]...)
  335. }
  336. for _, burn := range all {
  337. if burn.files[key] != files[0] {
  338. burn.files[key] = files[0].Clone(false)
  339. }
  340. }
  341. }
  342. analyser.onNewDay()
  343. }
  344. // Finalize returns the result of the analysis. Further Consume() calls are not expected.
  345. func (analyser *BurndownAnalysis) Finalize() interface{} {
  346. globalHistory, lastDay := analyser.groupSparseHistory(analyser.globalHistory, -1)
  347. fileHistories := map[string]DenseHistory{}
  348. for key, history := range analyser.fileHistories {
  349. fileHistories[key], _ = analyser.groupSparseHistory(history, lastDay)
  350. }
  351. peopleHistories := make([]DenseHistory, analyser.PeopleNumber)
  352. for i, history := range analyser.peopleHistories {
  353. if len(history) > 0 {
  354. // there can be people with only trivial merge commits and without own lines
  355. peopleHistories[i], _ = analyser.groupSparseHistory(history, lastDay)
  356. } else {
  357. peopleHistories[i] = make(DenseHistory, len(globalHistory))
  358. for j, gh := range globalHistory {
  359. peopleHistories[i][j] = make([]int64, len(gh))
  360. }
  361. }
  362. }
  363. peopleMatrix := make(DenseHistory, analyser.PeopleNumber)
  364. for i, row := range analyser.matrix {
  365. mrow := make([]int64, analyser.PeopleNumber+2)
  366. peopleMatrix[i] = mrow
  367. for key, val := range row {
  368. if key == identity.AuthorMissing {
  369. key = -1
  370. } else if key == authorSelf {
  371. key = -2
  372. }
  373. mrow[key+2] = val
  374. }
  375. }
  376. return BurndownResult{
  377. GlobalHistory: globalHistory,
  378. FileHistories: fileHistories,
  379. PeopleHistories: peopleHistories,
  380. PeopleMatrix: peopleMatrix,
  381. reversedPeopleDict: analyser.reversedPeopleDict,
  382. sampling: analyser.Sampling,
  383. granularity: analyser.Granularity,
  384. }
  385. }
  386. // Serialize converts the analysis result as returned by Finalize() to text or bytes.
  387. // The text format is YAML and the bytes format is Protocol Buffers.
  388. func (analyser *BurndownAnalysis) Serialize(result interface{}, binary bool, writer io.Writer) error {
  389. burndownResult := result.(BurndownResult)
  390. if binary {
  391. return analyser.serializeBinary(&burndownResult, writer)
  392. }
  393. analyser.serializeText(&burndownResult, writer)
  394. return nil
  395. }
  396. // Deserialize converts the specified protobuf bytes to BurndownResult.
  397. func (analyser *BurndownAnalysis) Deserialize(pbmessage []byte) (interface{}, error) {
  398. msg := pb.BurndownAnalysisResults{}
  399. err := proto.Unmarshal(pbmessage, &msg)
  400. if err != nil {
  401. return nil, err
  402. }
  403. result := BurndownResult{}
  404. convertCSR := func(mat *pb.BurndownSparseMatrix) DenseHistory {
  405. res := make(DenseHistory, mat.NumberOfRows)
  406. for i := 0; i < int(mat.NumberOfRows); i++ {
  407. res[i] = make([]int64, mat.NumberOfColumns)
  408. for j := 0; j < len(mat.Rows[i].Columns); j++ {
  409. res[i][j] = int64(mat.Rows[i].Columns[j])
  410. }
  411. }
  412. return res
  413. }
  414. result.GlobalHistory = convertCSR(msg.Project)
  415. result.FileHistories = map[string]DenseHistory{}
  416. for _, mat := range msg.Files {
  417. result.FileHistories[mat.Name] = convertCSR(mat)
  418. }
  419. result.reversedPeopleDict = make([]string, len(msg.People))
  420. result.PeopleHistories = make([]DenseHistory, len(msg.People))
  421. for i, mat := range msg.People {
  422. result.PeopleHistories[i] = convertCSR(mat)
  423. result.reversedPeopleDict[i] = mat.Name
  424. }
  425. if msg.PeopleInteraction != nil {
  426. result.PeopleMatrix = make(DenseHistory, msg.PeopleInteraction.NumberOfRows)
  427. }
  428. for i := 0; i < len(result.PeopleMatrix); i++ {
  429. result.PeopleMatrix[i] = make([]int64, msg.PeopleInteraction.NumberOfColumns)
  430. for j := int(msg.PeopleInteraction.Indptr[i]); j < int(msg.PeopleInteraction.Indptr[i+1]); j++ {
  431. result.PeopleMatrix[i][msg.PeopleInteraction.Indices[j]] = msg.PeopleInteraction.Data[j]
  432. }
  433. }
  434. result.sampling = int(msg.Sampling)
  435. result.granularity = int(msg.Granularity)
  436. return result, nil
  437. }
  438. // MergeResults combines two BurndownResult-s together.
  439. func (analyser *BurndownAnalysis) MergeResults(
  440. r1, r2 interface{}, c1, c2 *core.CommonAnalysisResult) interface{} {
  441. bar1 := r1.(BurndownResult)
  442. bar2 := r2.(BurndownResult)
  443. merged := BurndownResult{}
  444. if bar1.sampling < bar2.sampling {
  445. merged.sampling = bar1.sampling
  446. } else {
  447. merged.sampling = bar2.sampling
  448. }
  449. if bar1.granularity < bar2.granularity {
  450. merged.granularity = bar1.granularity
  451. } else {
  452. merged.granularity = bar2.granularity
  453. }
  454. var people map[string][3]int
  455. people, merged.reversedPeopleDict = identity.Detector{}.MergeReversedDicts(
  456. bar1.reversedPeopleDict, bar2.reversedPeopleDict)
  457. var wg sync.WaitGroup
  458. if len(bar1.GlobalHistory) > 0 || len(bar2.GlobalHistory) > 0 {
  459. wg.Add(1)
  460. go func() {
  461. defer wg.Done()
  462. merged.GlobalHistory = mergeMatrices(
  463. bar1.GlobalHistory, bar2.GlobalHistory,
  464. bar1.granularity, bar1.sampling,
  465. bar2.granularity, bar2.sampling,
  466. c1, c2)
  467. }()
  468. }
  469. if len(bar1.FileHistories) > 0 || len(bar2.FileHistories) > 0 {
  470. merged.FileHistories = map[string]DenseHistory{}
  471. historyMutex := sync.Mutex{}
  472. for key, fh1 := range bar1.FileHistories {
  473. if fh2, exists := bar2.FileHistories[key]; exists {
  474. wg.Add(1)
  475. go func(fh1, fh2 DenseHistory, key string) {
  476. defer wg.Done()
  477. historyMutex.Lock()
  478. defer historyMutex.Unlock()
  479. merged.FileHistories[key] = mergeMatrices(
  480. fh1, fh2, bar1.granularity, bar1.sampling, bar2.granularity, bar2.sampling, c1, c2)
  481. }(fh1, fh2, key)
  482. } else {
  483. historyMutex.Lock()
  484. merged.FileHistories[key] = fh1
  485. historyMutex.Unlock()
  486. }
  487. }
  488. for key, fh2 := range bar2.FileHistories {
  489. if _, exists := bar1.FileHistories[key]; !exists {
  490. historyMutex.Lock()
  491. merged.FileHistories[key] = fh2
  492. historyMutex.Unlock()
  493. }
  494. }
  495. }
  496. if len(merged.reversedPeopleDict) > 0 {
  497. merged.PeopleHistories = make([]DenseHistory, len(merged.reversedPeopleDict))
  498. for i, key := range merged.reversedPeopleDict {
  499. ptrs := people[key]
  500. if ptrs[1] < 0 {
  501. if len(bar2.PeopleHistories) > 0 {
  502. merged.PeopleHistories[i] = bar2.PeopleHistories[ptrs[2]]
  503. }
  504. } else if ptrs[2] < 0 {
  505. if len(bar1.PeopleHistories) > 0 {
  506. merged.PeopleHistories[i] = bar1.PeopleHistories[ptrs[1]]
  507. }
  508. } else {
  509. wg.Add(1)
  510. go func(i int) {
  511. defer wg.Done()
  512. var m1, m2 DenseHistory
  513. if len(bar1.PeopleHistories) > 0 {
  514. m1 = bar1.PeopleHistories[ptrs[1]]
  515. }
  516. if len(bar2.PeopleHistories) > 0 {
  517. m2 = bar2.PeopleHistories[ptrs[2]]
  518. }
  519. merged.PeopleHistories[i] = mergeMatrices(
  520. m1, m2,
  521. bar1.granularity, bar1.sampling,
  522. bar2.granularity, bar2.sampling,
  523. c1, c2,
  524. )
  525. }(i)
  526. }
  527. }
  528. wg.Add(1)
  529. go func() {
  530. defer wg.Done()
  531. if len(bar2.PeopleMatrix) == 0 {
  532. merged.PeopleMatrix = bar1.PeopleMatrix
  533. // extend the matrix in both directions
  534. for i := 0; i < len(merged.PeopleMatrix); i++ {
  535. for j := len(bar1.reversedPeopleDict); j < len(merged.reversedPeopleDict); j++ {
  536. merged.PeopleMatrix[i] = append(merged.PeopleMatrix[i], 0)
  537. }
  538. }
  539. for i := len(bar1.reversedPeopleDict); i < len(merged.reversedPeopleDict); i++ {
  540. merged.PeopleMatrix = append(
  541. merged.PeopleMatrix, make([]int64, len(merged.reversedPeopleDict)+2))
  542. }
  543. } else {
  544. merged.PeopleMatrix = make(DenseHistory, len(merged.reversedPeopleDict))
  545. for i := range merged.PeopleMatrix {
  546. merged.PeopleMatrix[i] = make([]int64, len(merged.reversedPeopleDict)+2)
  547. }
  548. for i, key := range bar1.reversedPeopleDict {
  549. mi := people[key][0] // index in merged.reversedPeopleDict
  550. copy(merged.PeopleMatrix[mi][:2], bar1.PeopleMatrix[i][:2])
  551. for j, val := range bar1.PeopleMatrix[i][2:] {
  552. merged.PeopleMatrix[mi][2+people[bar1.reversedPeopleDict[j]][0]] = val
  553. }
  554. }
  555. for i, key := range bar2.reversedPeopleDict {
  556. mi := people[key][0] // index in merged.reversedPeopleDict
  557. merged.PeopleMatrix[mi][0] += bar2.PeopleMatrix[i][0]
  558. merged.PeopleMatrix[mi][1] += bar2.PeopleMatrix[i][1]
  559. for j, val := range bar2.PeopleMatrix[i][2:] {
  560. merged.PeopleMatrix[mi][2+people[bar2.reversedPeopleDict[j]][0]] += val
  561. }
  562. }
  563. }
  564. }()
  565. }
  566. wg.Wait()
  567. return merged
  568. }
  569. // mergeMatrices takes two [number of samples][number of bands] matrices,
  570. // resamples them to days so that they become square, sums and resamples back to the
  571. // least of (sampling1, sampling2) and (granularity1, granularity2).
  572. func mergeMatrices(m1, m2 DenseHistory, granularity1, sampling1, granularity2, sampling2 int,
  573. c1, c2 *core.CommonAnalysisResult) DenseHistory {
  574. commonMerged := *c1
  575. commonMerged.Merge(c2)
  576. var granularity, sampling int
  577. if sampling1 < sampling2 {
  578. sampling = sampling1
  579. } else {
  580. sampling = sampling2
  581. }
  582. if granularity1 < granularity2 {
  583. granularity = granularity1
  584. } else {
  585. granularity = granularity2
  586. }
  587. size := int((commonMerged.EndTime - commonMerged.BeginTime) / (3600 * 24))
  588. daily := make([][]float32, size+granularity)
  589. for i := range daily {
  590. daily[i] = make([]float32, size+sampling)
  591. }
  592. if len(m1) > 0 {
  593. addBurndownMatrix(m1, granularity1, sampling1, daily,
  594. int(c1.BeginTime-commonMerged.BeginTime)/(3600*24))
  595. }
  596. if len(m2) > 0 {
  597. addBurndownMatrix(m2, granularity2, sampling2, daily,
  598. int(c2.BeginTime-commonMerged.BeginTime)/(3600*24))
  599. }
  600. // convert daily to [][]int64
  601. result := make(DenseHistory, (size+sampling-1)/sampling)
  602. for i := range result {
  603. result[i] = make([]int64, (size+granularity-1)/granularity)
  604. sampledIndex := i * sampling
  605. if i == len(result)-1 {
  606. sampledIndex = size - 1
  607. }
  608. for j := 0; j < len(result[i]); j++ {
  609. accum := float32(0)
  610. for k := j * granularity; k < (j+1)*granularity && k < size; k++ {
  611. accum += daily[sampledIndex][k]
  612. }
  613. result[i][j] = int64(accum)
  614. }
  615. }
  616. return result
  617. }
  618. // Explode `matrix` so that it is daily sampled and has daily bands, shift by `offset` days
  619. // and add to the accumulator. `daily` size is square and is guaranteed to fit `matrix` by
  620. // the caller.
  621. // Rows: *at least* len(matrix) * sampling + offset
  622. // Columns: *at least* len(matrix[...]) * granularity + offset
  623. // `matrix` can be sparse, so that the last columns which are equal to 0 are truncated.
  624. func addBurndownMatrix(matrix DenseHistory, granularity, sampling int, daily [][]float32, offset int) {
  625. // Determine the maximum number of bands; the actual one may be larger but we do not care
  626. maxCols := 0
  627. for _, row := range matrix {
  628. if maxCols < len(row) {
  629. maxCols = len(row)
  630. }
  631. }
  632. neededRows := len(matrix)*sampling + offset
  633. if len(daily) < neededRows {
  634. log.Panicf("merge bug: too few daily rows: required %d, have %d",
  635. neededRows, len(daily))
  636. }
  637. if len(daily[0]) < maxCols {
  638. log.Panicf("merge bug: too few daily cols: required %d, have %d",
  639. maxCols, len(daily[0]))
  640. }
  641. for x := 0; x < maxCols; x++ {
  642. for y := 0; y < len(matrix); y++ {
  643. if x*granularity > (y+1)*sampling {
  644. // the future is zeros
  645. continue
  646. }
  647. decay := func(startIndex int, startVal float32) {
  648. if startVal == 0 {
  649. return
  650. }
  651. k := float32(matrix[y][x]) / startVal // <= 1
  652. scale := float32((y+1)*sampling - startIndex)
  653. for i := x * granularity; i < (x+1)*granularity; i++ {
  654. initial := daily[startIndex-1+offset][i+offset]
  655. for j := startIndex; j < (y+1)*sampling; j++ {
  656. daily[j+offset][i+offset] = initial * (1 + (k-1)*float32(j-startIndex+1)/scale)
  657. }
  658. }
  659. }
  660. raise := func(finishIndex int, finishVal float32) {
  661. var initial float32
  662. if y > 0 {
  663. initial = float32(matrix[y-1][x])
  664. }
  665. startIndex := y * sampling
  666. if startIndex < x*granularity {
  667. startIndex = x * granularity
  668. }
  669. if startIndex == finishIndex {
  670. return
  671. }
  672. avg := (finishVal - initial) / float32(finishIndex-startIndex)
  673. for j := y * sampling; j < finishIndex; j++ {
  674. for i := startIndex; i <= j; i++ {
  675. daily[j+offset][i+offset] = avg
  676. }
  677. }
  678. // copy [x*g..y*s)
  679. for j := y * sampling; j < finishIndex; j++ {
  680. for i := x * granularity; i < y*sampling; i++ {
  681. daily[j+offset][i+offset] = daily[j-1+offset][i+offset]
  682. }
  683. }
  684. }
  685. if (x+1)*granularity >= (y+1)*sampling {
  686. // x*granularity <= (y+1)*sampling
  687. // 1. x*granularity <= y*sampling
  688. // y*sampling..(y+1)sampling
  689. //
  690. // x+1
  691. // /
  692. // /
  693. // / y+1 -|
  694. // / |
  695. // / y -|
  696. // /
  697. // / x
  698. //
  699. // 2. x*granularity > y*sampling
  700. // x*granularity..(y+1)sampling
  701. //
  702. // x+1
  703. // /
  704. // /
  705. // / y+1 -|
  706. // / |
  707. // / x -|
  708. // /
  709. // / y
  710. if x*granularity <= y*sampling {
  711. raise((y+1)*sampling, float32(matrix[y][x]))
  712. } else if (y+1)*sampling > x*granularity {
  713. raise((y+1)*sampling, float32(matrix[y][x]))
  714. avg := float32(matrix[y][x]) / float32((y+1)*sampling-x*granularity)
  715. for j := x * granularity; j < (y+1)*sampling; j++ {
  716. for i := x * granularity; i <= j; i++ {
  717. daily[j+offset][i+offset] = avg
  718. }
  719. }
  720. }
  721. } else if (x+1)*granularity >= y*sampling {
  722. // y*sampling <= (x+1)*granularity < (y+1)sampling
  723. // y*sampling..(x+1)*granularity
  724. // (x+1)*granularity..(y+1)sampling
  725. // x+1
  726. // /\
  727. // / \
  728. // / \
  729. // / y+1
  730. // /
  731. // y
  732. v1 := float32(matrix[y-1][x])
  733. v2 := float32(matrix[y][x])
  734. var peak float32
  735. delta := float32((x+1)*granularity - y*sampling)
  736. var scale float32
  737. var previous float32
  738. if y > 0 && (y-1)*sampling >= x*granularity {
  739. // x*g <= (y-1)*s <= y*s <= (x+1)*g <= (y+1)*s
  740. // |________|.......^
  741. if y > 1 {
  742. previous = float32(matrix[y-2][x])
  743. }
  744. scale = float32(sampling)
  745. } else {
  746. // (y-1)*s < x*g <= y*s <= (x+1)*g <= (y+1)*s
  747. // |______|.......^
  748. if y == 0 {
  749. scale = float32(sampling)
  750. } else {
  751. scale = float32(y*sampling - x*granularity)
  752. }
  753. }
  754. peak = v1 + (v1-previous)/scale*delta
  755. if v2 > peak {
  756. // we need to adjust the peak, it may not be less than the decayed value
  757. if y < len(matrix)-1 {
  758. // y*s <= (x+1)*g <= (y+1)*s < (y+2)*s
  759. // ^.........|_________|
  760. k := (v2 - float32(matrix[y+1][x])) / float32(sampling) // > 0
  761. peak = float32(matrix[y][x]) + k*float32((y+1)*sampling-(x+1)*granularity)
  762. // peak > v2 > v1
  763. } else {
  764. peak = v2
  765. // not enough data to interpolate; this is at least not restricted
  766. }
  767. }
  768. raise((x+1)*granularity, peak)
  769. decay((x+1)*granularity, peak)
  770. } else {
  771. // (x+1)*granularity < y*sampling
  772. // y*sampling..(y+1)sampling
  773. decay(y*sampling, float32(matrix[y-1][x]))
  774. }
  775. }
  776. }
  777. }
  778. func (analyser *BurndownAnalysis) serializeText(result *BurndownResult, writer io.Writer) {
  779. fmt.Fprintln(writer, " granularity:", result.granularity)
  780. fmt.Fprintln(writer, " sampling:", result.sampling)
  781. yaml.PrintMatrix(writer, result.GlobalHistory, 2, "project", true)
  782. if len(result.FileHistories) > 0 {
  783. fmt.Fprintln(writer, " files:")
  784. keys := sortedKeys(result.FileHistories)
  785. for _, key := range keys {
  786. yaml.PrintMatrix(writer, result.FileHistories[key], 4, key, true)
  787. }
  788. }
  789. if len(result.PeopleHistories) > 0 {
  790. fmt.Fprintln(writer, " people_sequence:")
  791. for key := range result.PeopleHistories {
  792. fmt.Fprintln(writer, " - "+yaml.SafeString(result.reversedPeopleDict[key]))
  793. }
  794. fmt.Fprintln(writer, " people:")
  795. for key, val := range result.PeopleHistories {
  796. yaml.PrintMatrix(writer, val, 4, result.reversedPeopleDict[key], true)
  797. }
  798. fmt.Fprintln(writer, " people_interaction: |-")
  799. yaml.PrintMatrix(writer, result.PeopleMatrix, 4, "", false)
  800. }
  801. }
  802. func (analyser *BurndownAnalysis) serializeBinary(result *BurndownResult, writer io.Writer) error {
  803. message := pb.BurndownAnalysisResults{
  804. Granularity: int32(result.granularity),
  805. Sampling: int32(result.sampling),
  806. }
  807. if len(result.GlobalHistory) > 0 {
  808. message.Project = pb.ToBurndownSparseMatrix(result.GlobalHistory, "project")
  809. }
  810. if len(result.FileHistories) > 0 {
  811. message.Files = make([]*pb.BurndownSparseMatrix, len(result.FileHistories))
  812. keys := sortedKeys(result.FileHistories)
  813. i := 0
  814. for _, key := range keys {
  815. message.Files[i] = pb.ToBurndownSparseMatrix(
  816. result.FileHistories[key], key)
  817. i++
  818. }
  819. }
  820. if len(result.PeopleHistories) > 0 {
  821. message.People = make(
  822. []*pb.BurndownSparseMatrix, len(result.PeopleHistories))
  823. for key, val := range result.PeopleHistories {
  824. if len(val) > 0 {
  825. message.People[key] = pb.ToBurndownSparseMatrix(val, result.reversedPeopleDict[key])
  826. }
  827. }
  828. message.PeopleInteraction = pb.DenseToCompressedSparseRowMatrix(result.PeopleMatrix)
  829. }
  830. serialized, err := proto.Marshal(&message)
  831. if err != nil {
  832. return err
  833. }
  834. writer.Write(serialized)
  835. return nil
  836. }
  837. func sortedKeys(m map[string]DenseHistory) []string {
  838. keys := make([]string, 0, len(m))
  839. for k := range m {
  840. keys = append(keys, k)
  841. }
  842. sort.Strings(keys)
  843. return keys
  844. }
  845. func checkClose(c io.Closer) {
  846. if err := c.Close(); err != nil {
  847. panic(err)
  848. }
  849. }
  850. // We do a hack and store the day in the first 14 bits and the author index in the last 18.
  851. // Strictly speaking, int can be 64-bit and then the author index occupies 32+18 bits.
  852. // This hack is needed to simplify the values storage inside File-s. We can compare
  853. // different values together and they are compared as days for the same author.
  854. func (analyser *BurndownAnalysis) packPersonWithDay(person int, day int) int {
  855. if analyser.PeopleNumber == 0 {
  856. return day
  857. }
  858. result := day & burndown.TreeMergeMark
  859. result |= person << burndown.TreeMaxBinPower
  860. // This effectively means max (16383 - 1) days (>44 years) and (131072 - 2) devs.
  861. // One day less because burndown.TreeMergeMark = ((1 << 14) - 1) is a special day.
  862. return result
  863. }
  864. func (analyser *BurndownAnalysis) unpackPersonWithDay(value int) (int, int) {
  865. if analyser.PeopleNumber == 0 {
  866. return identity.AuthorMissing, value
  867. }
  868. return value >> burndown.TreeMaxBinPower, value & burndown.TreeMergeMark
  869. }
  870. func (analyser *BurndownAnalysis) onNewDay() {
  871. if analyser.day > analyser.previousDay {
  872. analyser.previousDay = analyser.day
  873. }
  874. analyser.mergedAuthor = identity.AuthorMissing
  875. }
  876. func (analyser *BurndownAnalysis) updateGlobal(currentTime, previousTime, delta int) {
  877. _, currentDay := analyser.unpackPersonWithDay(currentTime)
  878. _, previousDay := analyser.unpackPersonWithDay(previousTime)
  879. currentHistory := analyser.globalHistory[currentDay]
  880. if currentHistory == nil {
  881. currentHistory = map[int]int64{}
  882. analyser.globalHistory[currentDay] = currentHistory
  883. }
  884. currentHistory[previousDay] += int64(delta)
  885. }
  886. // updateFile is bound to the specific `history` in the closure.
  887. func (analyser *BurndownAnalysis) updateFile(
  888. history sparseHistory, currentTime, previousTime, delta int) {
  889. _, currentDay := analyser.unpackPersonWithDay(currentTime)
  890. _, previousDay := analyser.unpackPersonWithDay(previousTime)
  891. currentHistory := history[currentDay]
  892. if currentHistory == nil {
  893. currentHistory = map[int]int64{}
  894. history[currentDay] = currentHistory
  895. }
  896. currentHistory[previousDay] += int64(delta)
  897. }
  898. func (analyser *BurndownAnalysis) updateAuthor(currentTime, previousTime, delta int) {
  899. previousAuthor, previousDay := analyser.unpackPersonWithDay(previousTime)
  900. if previousAuthor == identity.AuthorMissing {
  901. return
  902. }
  903. _, currentDay := analyser.unpackPersonWithDay(currentTime)
  904. history := analyser.peopleHistories[previousAuthor]
  905. if history == nil {
  906. history = sparseHistory{}
  907. analyser.peopleHistories[previousAuthor] = history
  908. }
  909. currentHistory := history[currentDay]
  910. if currentHistory == nil {
  911. currentHistory = map[int]int64{}
  912. history[currentDay] = currentHistory
  913. }
  914. currentHistory[previousDay] += int64(delta)
  915. }
  916. func (analyser *BurndownAnalysis) updateMatrix(currentTime, previousTime, delta int) {
  917. newAuthor, _ := analyser.unpackPersonWithDay(currentTime)
  918. oldAuthor, _ := analyser.unpackPersonWithDay(previousTime)
  919. if oldAuthor == identity.AuthorMissing {
  920. return
  921. }
  922. if newAuthor == oldAuthor && delta > 0 {
  923. newAuthor = authorSelf
  924. }
  925. row := analyser.matrix[oldAuthor]
  926. if row == nil {
  927. row = map[int]int64{}
  928. analyser.matrix[oldAuthor] = row
  929. }
  930. cell, exists := row[newAuthor]
  931. if !exists {
  932. row[newAuthor] = 0
  933. cell = 0
  934. }
  935. row[newAuthor] = cell + int64(delta)
  936. }
  937. func (analyser *BurndownAnalysis) newFile(
  938. hash plumbing.Hash, name string, author int, day int, size int) (*burndown.File, error) {
  939. updaters := make([]burndown.Updater, 1)
  940. updaters[0] = analyser.updateGlobal
  941. if analyser.TrackFiles {
  942. history := analyser.fileHistories[name]
  943. if history == nil {
  944. // can be not nil if the file was created in a future branch
  945. history = sparseHistory{}
  946. }
  947. analyser.fileHistories[name] = history
  948. updaters = append(updaters, func(currentTime, previousTime, delta int) {
  949. analyser.updateFile(history, currentTime, previousTime, delta)
  950. })
  951. }
  952. if analyser.PeopleNumber > 0 {
  953. updaters = append(updaters, analyser.updateAuthor)
  954. updaters = append(updaters, analyser.updateMatrix)
  955. day = analyser.packPersonWithDay(author, day)
  956. }
  957. return burndown.NewFile(day, size, updaters...), nil
  958. }
  959. func (analyser *BurndownAnalysis) handleInsertion(
  960. change *object.Change, author int, cache map[plumbing.Hash]*items.CachedBlob) error {
  961. blob := cache[change.To.TreeEntry.Hash]
  962. lines, err := blob.CountLines()
  963. if err != nil {
  964. // binary
  965. return nil
  966. }
  967. name := change.To.Name
  968. file, exists := analyser.files[name]
  969. if exists {
  970. println("\n", analyser, "error")
  971. return fmt.Errorf("file %s already exists", name)
  972. }
  973. var hash plumbing.Hash
  974. if analyser.day != burndown.TreeMergeMark {
  975. hash = blob.Hash
  976. }
  977. file, err = analyser.newFile(hash, name, author, analyser.day, lines)
  978. analyser.files[name] = file
  979. if analyser.day == burndown.TreeMergeMark {
  980. analyser.mergedFiles[name] = true
  981. }
  982. return err
  983. }
  984. func (analyser *BurndownAnalysis) handleDeletion(
  985. change *object.Change, author int, cache map[plumbing.Hash]*items.CachedBlob) error {
  986. name := change.From.Name
  987. file, exists := analyser.files[name]
  988. blob := cache[change.From.TreeEntry.Hash]
  989. lines, err := blob.CountLines()
  990. if exists && err != nil {
  991. return fmt.Errorf("file %s unexpectedly became binary", name)
  992. }
  993. if !exists {
  994. return nil
  995. }
  996. file.Update(analyser.packPersonWithDay(author, analyser.day), 0, 0, lines)
  997. delete(analyser.files, name)
  998. delete(analyser.fileHistories, name)
  999. analyser.renames[name] = ""
  1000. if analyser.day == burndown.TreeMergeMark {
  1001. analyser.mergedFiles[name] = false
  1002. }
  1003. return nil
  1004. }
  1005. func (analyser *BurndownAnalysis) handleModification(
  1006. change *object.Change, author int, cache map[plumbing.Hash]*items.CachedBlob,
  1007. diffs map[string]items.FileDiffData) error {
  1008. if analyser.day == burndown.TreeMergeMark {
  1009. analyser.mergedFiles[change.To.Name] = true
  1010. }
  1011. file, exists := analyser.files[change.From.Name]
  1012. if !exists {
  1013. // this indeed may happen
  1014. return analyser.handleInsertion(change, author, cache)
  1015. }
  1016. // possible rename
  1017. if change.To.Name != change.From.Name {
  1018. err := analyser.handleRename(change.From.Name, change.To.Name)
  1019. if err != nil {
  1020. return err
  1021. }
  1022. }
  1023. // Check for binary changes
  1024. blobFrom := cache[change.From.TreeEntry.Hash]
  1025. _, errFrom := blobFrom.CountLines()
  1026. blobTo := cache[change.To.TreeEntry.Hash]
  1027. _, errTo := blobTo.CountLines()
  1028. if errFrom != errTo {
  1029. if errFrom != nil {
  1030. // the file is no longer binary
  1031. return analyser.handleInsertion(change, author, cache)
  1032. }
  1033. // the file became binary
  1034. return analyser.handleDeletion(change, author, cache)
  1035. } else if errFrom != nil {
  1036. // what are we doing here?!
  1037. return nil
  1038. }
  1039. thisDiffs := diffs[change.To.Name]
  1040. if file.Len() != thisDiffs.OldLinesOfCode {
  1041. log.Printf("====TREE====\n%s", file.Dump())
  1042. return fmt.Errorf("%s: internal integrity error src %d != %d %s -> %s",
  1043. change.To.Name, thisDiffs.OldLinesOfCode, file.Len(),
  1044. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String())
  1045. }
  1046. // we do not call RunesToDiffLines so the number of lines equals
  1047. // to the rune count
  1048. position := 0
  1049. pending := diffmatchpatch.Diff{Text: ""}
  1050. apply := func(edit diffmatchpatch.Diff) {
  1051. length := utf8.RuneCountInString(edit.Text)
  1052. if edit.Type == diffmatchpatch.DiffInsert {
  1053. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length, 0)
  1054. position += length
  1055. } else {
  1056. file.Update(analyser.packPersonWithDay(author, analyser.day), position, 0, length)
  1057. }
  1058. if analyser.Debug {
  1059. file.Validate()
  1060. }
  1061. }
  1062. for _, edit := range thisDiffs.Diffs {
  1063. dumpBefore := ""
  1064. if analyser.Debug {
  1065. dumpBefore = file.Dump()
  1066. }
  1067. length := utf8.RuneCountInString(edit.Text)
  1068. debugError := func() {
  1069. log.Printf("%s: internal diff error\n", change.To.Name)
  1070. log.Printf("Update(%d, %d, %d (0), %d (0))\n", analyser.day, position,
  1071. length, utf8.RuneCountInString(pending.Text))
  1072. if dumpBefore != "" {
  1073. log.Printf("====TREE BEFORE====\n%s====END====\n", dumpBefore)
  1074. }
  1075. log.Printf("====TREE AFTER====\n%s====END====\n", file.Dump())
  1076. }
  1077. switch edit.Type {
  1078. case diffmatchpatch.DiffEqual:
  1079. if pending.Text != "" {
  1080. apply(pending)
  1081. pending.Text = ""
  1082. }
  1083. position += length
  1084. case diffmatchpatch.DiffInsert:
  1085. if pending.Text != "" {
  1086. if pending.Type == diffmatchpatch.DiffInsert {
  1087. debugError()
  1088. return errors.New("DiffInsert may not appear after DiffInsert")
  1089. }
  1090. file.Update(analyser.packPersonWithDay(author, analyser.day), position, length,
  1091. utf8.RuneCountInString(pending.Text))
  1092. if analyser.Debug {
  1093. file.Validate()
  1094. }
  1095. position += length
  1096. pending.Text = ""
  1097. } else {
  1098. pending = edit
  1099. }
  1100. case diffmatchpatch.DiffDelete:
  1101. if pending.Text != "" {
  1102. debugError()
  1103. return errors.New("DiffDelete may not appear after DiffInsert/DiffDelete")
  1104. }
  1105. pending = edit
  1106. default:
  1107. debugError()
  1108. return fmt.Errorf("diff operation is not supported: %d", edit.Type)
  1109. }
  1110. }
  1111. if pending.Text != "" {
  1112. apply(pending)
  1113. pending.Text = ""
  1114. }
  1115. if file.Len() != thisDiffs.NewLinesOfCode {
  1116. return fmt.Errorf("%s: internal integrity error dst %d != %d %s -> %s",
  1117. change.To.Name, thisDiffs.NewLinesOfCode, file.Len(),
  1118. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String())
  1119. }
  1120. return nil
  1121. }
  1122. func (analyser *BurndownAnalysis) handleRename(from, to string) error {
  1123. if from == to {
  1124. return nil
  1125. }
  1126. file, exists := analyser.files[from]
  1127. if !exists {
  1128. return fmt.Errorf("file %s > %s does not exist (files)", from, to)
  1129. }
  1130. analyser.files[to] = file
  1131. delete(analyser.files, from)
  1132. if analyser.day == burndown.TreeMergeMark {
  1133. analyser.mergedFiles[from] = false
  1134. }
  1135. if analyser.TrackFiles {
  1136. history := analyser.fileHistories[from]
  1137. if history == nil {
  1138. // a future branch could have already renamed it and we are retarded
  1139. futureRename, exists := analyser.renames[from]
  1140. if futureRename == "" && exists {
  1141. // the file will be deleted in the future, whatever
  1142. history = sparseHistory{}
  1143. } else {
  1144. history = analyser.fileHistories[futureRename]
  1145. if history == nil {
  1146. return fmt.Errorf("file %s > %s does not exist (histories)", from, to)
  1147. }
  1148. }
  1149. }
  1150. analyser.fileHistories[to] = history
  1151. delete(analyser.fileHistories, from)
  1152. }
  1153. analyser.renames[from] = to
  1154. return nil
  1155. }
  1156. func (analyser *BurndownAnalysis) groupSparseHistory(
  1157. history sparseHistory, lastDay int) (DenseHistory, int) {
  1158. if len(history) == 0 {
  1159. panic("empty history")
  1160. }
  1161. var days []int
  1162. for day := range history {
  1163. days = append(days, day)
  1164. }
  1165. sort.Ints(days)
  1166. if lastDay >= 0 {
  1167. if days[len(days)-1] < lastDay {
  1168. days = append(days, lastDay)
  1169. } else if days[len(days)-1] > lastDay {
  1170. panic("days corruption")
  1171. }
  1172. } else {
  1173. lastDay = days[len(days)-1]
  1174. }
  1175. // [y][x]
  1176. // y - sampling
  1177. // x - granularity
  1178. samples := lastDay / analyser.Sampling + 1
  1179. bands := lastDay / analyser.Granularity + 1
  1180. result := make(DenseHistory, samples)
  1181. for i := 0; i < bands; i++ {
  1182. result[i] = make([]int64, bands)
  1183. }
  1184. prevsi := 0
  1185. for _, day := range days {
  1186. si := day / analyser.Sampling
  1187. if si > prevsi {
  1188. state := result[prevsi]
  1189. for i := prevsi + 1; i <= si; i++ {
  1190. copy(result[i], state)
  1191. }
  1192. prevsi = si
  1193. }
  1194. sample := result[si]
  1195. for bday, value := range history[day] {
  1196. sample[bday / analyser.Granularity] += value
  1197. }
  1198. }
  1199. return result, lastDay
  1200. }
  1201. func init() {
  1202. core.Registry.Register(&BurndownAnalysis{})
  1203. }