analyser.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. package hercules
  2. import (
  3. "bufio"
  4. "bytes"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "os"
  9. "sort"
  10. "time"
  11. "unicode/utf8"
  12. "github.com/sergi/go-diff/diffmatchpatch"
  13. "gopkg.in/src-d/go-git.v4"
  14. "gopkg.in/src-d/go-git.v4/config"
  15. "gopkg.in/src-d/go-git.v4/plumbing"
  16. "gopkg.in/src-d/go-git.v4/plumbing/object"
  17. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  18. )
  19. // Analyser allows to gather the line burndown statistics for a Git repository.
  20. type Analyser struct {
  21. // Repository points to the analysed Git repository struct from go-git.
  22. Repository *git.Repository
  23. // Granularity sets the size of each band - the number of days it spans.
  24. // Smaller values provide better resolution but require more work and eat more
  25. // memory. 30 days is usually enough.
  26. Granularity int
  27. // Sampling sets how detailed is the statistic - the size of the interval in
  28. // days between consecutive measurements. It is usually a good idea to set it
  29. // <= Granularity. Try 15 or 30.
  30. Sampling int
  31. // SimilarityThreshold adjusts the heuristic to determine file renames.
  32. // It has the same units as cgit's -X rename-threshold or -M. Better to
  33. // set it to the default value of 90 (90%).
  34. SimilarityThreshold int
  35. // Indicates whether we should record per-developer burndown stats.
  36. MeasurePeople bool
  37. // Maps email -> developer id.
  38. PeopleDict map[string]int
  39. // Debug activates the debugging mode. Analyse() runs slower in this mode
  40. // but it accurately checks all the intermediate states for invariant
  41. // violations.
  42. Debug bool
  43. // OnProgress is the callback which is invoked in Analyse() to output it's
  44. // progress. The first argument is the number of processed commits and the
  45. // second is the total number of commits.
  46. OnProgress func(int, int)
  47. }
  48. func checkClose(c io.Closer) {
  49. if err := c.Close(); err != nil {
  50. panic(err)
  51. }
  52. }
  53. func loc(file *object.Blob) (int, error) {
  54. reader, err := file.Reader()
  55. if err != nil {
  56. panic(err)
  57. }
  58. defer checkClose(reader)
  59. var scanner *bufio.Scanner
  60. buffer := make([]byte, bufio.MaxScanTokenSize)
  61. counter := 0
  62. for scanner == nil || scanner.Err() == bufio.ErrTooLong {
  63. if scanner != nil && !utf8.Valid(scanner.Bytes()) {
  64. return -1, errors.New("binary")
  65. }
  66. scanner = bufio.NewScanner(reader)
  67. scanner.Buffer(buffer, 0)
  68. for scanner.Scan() {
  69. if !utf8.Valid(scanner.Bytes()) {
  70. return -1, errors.New("binary")
  71. }
  72. counter++
  73. }
  74. }
  75. return counter, nil
  76. }
  77. func str(file *object.Blob) string {
  78. reader, err := file.Reader()
  79. if err != nil {
  80. panic(err)
  81. }
  82. defer checkClose(reader)
  83. buf := new(bytes.Buffer)
  84. buf.ReadFrom(reader)
  85. return buf.String()
  86. }
  87. type dummyIO struct {
  88. }
  89. func (dummyIO) Read(p []byte) (int, error) {
  90. return 0, io.EOF
  91. }
  92. func (dummyIO) Write(p []byte) (int, error) {
  93. return len(p), nil
  94. }
  95. func (dummyIO) Close() error {
  96. return nil
  97. }
  98. type dummyEncodedObject struct {
  99. FakeHash plumbing.Hash
  100. }
  101. func (obj dummyEncodedObject) Hash() plumbing.Hash {
  102. return obj.FakeHash
  103. }
  104. func (obj dummyEncodedObject) Type() plumbing.ObjectType {
  105. return plumbing.BlobObject
  106. }
  107. func (obj dummyEncodedObject) SetType(plumbing.ObjectType) {
  108. }
  109. func (obj dummyEncodedObject) Size() int64 {
  110. return 0
  111. }
  112. func (obj dummyEncodedObject) SetSize(int64) {
  113. }
  114. func (obj dummyEncodedObject) Reader() (io.ReadCloser, error) {
  115. return dummyIO{}, nil
  116. }
  117. func (obj dummyEncodedObject) Writer() (io.WriteCloser, error) {
  118. return dummyIO{}, nil
  119. }
  120. func createDummyBlob(hash *plumbing.Hash) (*object.Blob, error) {
  121. return object.DecodeBlob(dummyEncodedObject{*hash})
  122. }
  123. func (analyser *Analyser) handleInsertion(
  124. change *object.Change, day int, status map[int]int64, files map[string]*File,
  125. cache *map[plumbing.Hash]*object.Blob) {
  126. blob := (*cache)[change.To.TreeEntry.Hash]
  127. lines, err := loc(blob)
  128. if err != nil {
  129. return
  130. }
  131. name := change.To.Name
  132. file, exists := files[name]
  133. if exists {
  134. panic(fmt.Sprintf("file %s already exists", name))
  135. }
  136. // The second status is specific to each file.
  137. file = NewFile(day, lines, status, make(map[int]int64))
  138. files[name] = file
  139. }
  140. func (analyser *Analyser) handleDeletion(
  141. change *object.Change, day int, status map[int]int64, files map[string]*File,
  142. cache *map[plumbing.Hash]*object.Blob) {
  143. blob := (*cache)[change.From.TreeEntry.Hash]
  144. lines, err := loc(blob)
  145. if err != nil {
  146. return
  147. }
  148. name := change.From.Name
  149. file := files[name]
  150. file.Update(day, 0, 0, lines)
  151. delete(files, name)
  152. }
  153. func (analyser *Analyser) handleModification(
  154. change *object.Change, day int, status map[int]int64, files map[string]*File,
  155. cache *map[plumbing.Hash]*object.Blob) {
  156. blob_from := (*cache)[change.From.TreeEntry.Hash]
  157. blob_to := (*cache)[change.To.TreeEntry.Hash]
  158. // we are not validating UTF-8 here because for example
  159. // git/git 4f7770c87ce3c302e1639a7737a6d2531fe4b160 fetch-pack.c is invalid UTF-8
  160. str_from := str(blob_from)
  161. str_to := str(blob_to)
  162. file, exists := files[change.From.Name]
  163. if !exists {
  164. analyser.handleInsertion(change, day, status, files, cache)
  165. return
  166. }
  167. // possible rename
  168. if change.To.Name != change.From.Name {
  169. analyser.handleRename(change.From.Name, change.To.Name, files)
  170. }
  171. dmp := diffmatchpatch.New()
  172. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  173. if file.Len() != len(src) {
  174. fmt.Fprintf(os.Stderr, "====TREE====\n%s", file.Dump())
  175. panic(fmt.Sprintf("%s: internal integrity error src %d != %d %s -> %s",
  176. change.To.Name, len(src), file.Len(),
  177. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String()))
  178. }
  179. diffs := dmp.DiffMainRunes(src, dst, false)
  180. // we do not call RunesToDiffLines so the number of lines equals
  181. // to the rune count
  182. position := 0
  183. pending := diffmatchpatch.Diff{Text: ""}
  184. apply := func(edit diffmatchpatch.Diff) {
  185. length := utf8.RuneCountInString(edit.Text)
  186. if edit.Type == diffmatchpatch.DiffInsert {
  187. file.Update(day, position, length, 0)
  188. position += length
  189. } else {
  190. file.Update(day, position, 0, length)
  191. }
  192. if analyser.Debug {
  193. file.Validate()
  194. }
  195. }
  196. for _, edit := range diffs {
  197. dump_before := ""
  198. if analyser.Debug {
  199. dump_before = file.Dump()
  200. }
  201. length := utf8.RuneCountInString(edit.Text)
  202. func() {
  203. defer func() {
  204. r := recover()
  205. if r != nil {
  206. fmt.Fprintf(os.Stderr, "%s: internal diff error\n", change.To.Name)
  207. fmt.Fprintf(os.Stderr, "Update(%d, %d, %d (0), %d (0))\n", day, position,
  208. length, utf8.RuneCountInString(pending.Text))
  209. if dump_before != "" {
  210. fmt.Fprintf(os.Stderr, "====TREE BEFORE====\n%s====END====\n", dump_before)
  211. }
  212. fmt.Fprintf(os.Stderr, "====TREE AFTER====\n%s====END====\n", file.Dump())
  213. panic(r)
  214. }
  215. }()
  216. switch edit.Type {
  217. case diffmatchpatch.DiffEqual:
  218. if pending.Text != "" {
  219. apply(pending)
  220. pending.Text = ""
  221. }
  222. position += length
  223. case diffmatchpatch.DiffInsert:
  224. if pending.Text != "" {
  225. if pending.Type == diffmatchpatch.DiffInsert {
  226. panic("DiffInsert may not appear after DiffInsert")
  227. }
  228. file.Update(day, position, length, utf8.RuneCountInString(pending.Text))
  229. if analyser.Debug {
  230. file.Validate()
  231. }
  232. position += length
  233. pending.Text = ""
  234. } else {
  235. pending = edit
  236. }
  237. case diffmatchpatch.DiffDelete:
  238. if pending.Text != "" {
  239. panic("DiffDelete may not appear after DiffInsert/DiffDelete")
  240. }
  241. pending = edit
  242. default:
  243. panic(fmt.Sprintf("diff operation is not supported: %d", edit.Type))
  244. }
  245. }()
  246. }
  247. if pending.Text != "" {
  248. apply(pending)
  249. pending.Text = ""
  250. }
  251. if file.Len() != len(dst) {
  252. panic(fmt.Sprintf("%s: internal integrity error dst %d != %d",
  253. change.To.Name, len(dst), file.Len()))
  254. }
  255. }
  256. func (analyser *Analyser) handleRename(from, to string, files map[string]*File) {
  257. file, exists := files[from]
  258. if !exists {
  259. panic(fmt.Sprintf("file %s does not exist", from))
  260. }
  261. files[to] = file
  262. delete(files, from)
  263. }
  264. // Commits returns the critical path in the repository's history. It starts
  265. // from HEAD and traces commits backwards till the root. When it encounters
  266. // a merge (more than one parent), it always chooses the first parent.
  267. func (analyser *Analyser) Commits() []*object.Commit {
  268. result := []*object.Commit{}
  269. repository := analyser.Repository
  270. head, err := repository.Head()
  271. if err != nil {
  272. panic(err)
  273. }
  274. commit, err := repository.CommitObject(head.Hash())
  275. if err != nil {
  276. panic(err)
  277. }
  278. result = append(result, commit)
  279. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  280. if err != nil {
  281. panic(err)
  282. }
  283. result = append(result, commit)
  284. }
  285. // reverse the order
  286. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  287. result[i], result[j] = result[j], result[i]
  288. }
  289. return result
  290. }
  291. func (analyser *Analyser) groupStatus(
  292. status map[int]int64,
  293. files map[string]*File,
  294. day int) ([]int64, map[string][]int64) {
  295. granularity := analyser.Granularity
  296. if granularity == 0 {
  297. granularity = 1
  298. }
  299. day++
  300. adjust := 0
  301. if day%granularity != 0 {
  302. adjust = 1
  303. }
  304. global := make([]int64, day/granularity+adjust)
  305. var group int64
  306. for i := 0; i < day; i++ {
  307. group += status[i]
  308. if (i%granularity) == (granularity - 1) {
  309. global[i/granularity] = group
  310. group = 0
  311. }
  312. }
  313. if day%granularity != 0 {
  314. global[len(global)-1] = group
  315. }
  316. locals := make(map[string][]int64)
  317. for key, file := range files {
  318. status := make([]int64, day/granularity+adjust)
  319. var group int64
  320. for i := 0; i < day; i++ {
  321. group += file.Status(1)[i]
  322. if (i%granularity) == (granularity - 1) {
  323. status[i/granularity] = group
  324. group = 0
  325. }
  326. }
  327. if day%granularity != 0 {
  328. status[len(status)-1] = group
  329. }
  330. locals[key] = status
  331. }
  332. return global, locals
  333. }
  334. func (analyser *Analyser) updateHistories(
  335. global_history [][]int64, global_status []int64,
  336. file_histories map[string][][]int64, file_statuses map[string][]int64,
  337. delta int) [][]int64 {
  338. for i := 0; i < delta; i++ {
  339. global_history = append(global_history, global_status)
  340. }
  341. to_delete := make([]string, 0)
  342. for key, fh := range file_histories {
  343. ls, exists := file_statuses[key]
  344. if !exists {
  345. to_delete = append(to_delete, key)
  346. } else {
  347. for i := 0; i < delta; i++ {
  348. fh = append(fh, ls)
  349. }
  350. file_histories[key] = fh
  351. }
  352. }
  353. for _, key := range to_delete {
  354. delete(file_histories, key)
  355. }
  356. for key, ls := range file_statuses {
  357. fh, exists := file_histories[key]
  358. if exists {
  359. continue
  360. }
  361. for i := 0; i < delta; i++ {
  362. fh = append(fh, ls)
  363. }
  364. file_histories[key] = fh
  365. }
  366. return global_history
  367. }
  368. type sortableChange struct {
  369. change *object.Change
  370. hash plumbing.Hash
  371. }
  372. type sortableChanges []sortableChange
  373. func (change *sortableChange) Less(other *sortableChange) bool {
  374. for x := 0; x < 20; x++ {
  375. if change.hash[x] < other.hash[x] {
  376. return true
  377. }
  378. }
  379. return false
  380. }
  381. func (slice sortableChanges) Len() int {
  382. return len(slice)
  383. }
  384. func (slice sortableChanges) Less(i, j int) bool {
  385. return slice[i].Less(&slice[j])
  386. }
  387. func (slice sortableChanges) Swap(i, j int) {
  388. slice[i], slice[j] = slice[j], slice[i]
  389. }
  390. type sortableBlob struct {
  391. change *object.Change
  392. size int64
  393. }
  394. type sortableBlobs []sortableBlob
  395. func (change *sortableBlob) Less(other *sortableBlob) bool {
  396. return change.size < other.size
  397. }
  398. func (slice sortableBlobs) Len() int {
  399. return len(slice)
  400. }
  401. func (slice sortableBlobs) Less(i, j int) bool {
  402. return slice[i].Less(&slice[j])
  403. }
  404. func (slice sortableBlobs) Swap(i, j int) {
  405. slice[i], slice[j] = slice[j], slice[i]
  406. }
  407. func (analyser *Analyser) sizesAreClose(size1 int64, size2 int64) bool {
  408. return abs64(size1-size2)*100/max64(1, min64(size1, size2)) <=
  409. int64(100-analyser.SimilarityThreshold)
  410. }
  411. func (analyser *Analyser) blobsAreClose(
  412. blob1 *object.Blob, blob2 *object.Blob) bool {
  413. str_from := str(blob1)
  414. str_to := str(blob2)
  415. dmp := diffmatchpatch.New()
  416. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  417. diffs := dmp.DiffMainRunes(src, dst, false)
  418. common := 0
  419. for _, edit := range diffs {
  420. if edit.Type == diffmatchpatch.DiffEqual {
  421. common += utf8.RuneCountInString(edit.Text)
  422. }
  423. }
  424. return common*100/max(1, min(len(src), len(dst))) >=
  425. analyser.SimilarityThreshold
  426. }
  427. func (analyser *Analyser) getBlob(entry *object.ChangeEntry, commit *object.Commit) (
  428. *object.Blob, error) {
  429. blob, err := analyser.Repository.BlobObject(entry.TreeEntry.Hash)
  430. if err != nil {
  431. if err.Error() != git.ErrObjectNotFound.Error() {
  432. fmt.Fprintf(os.Stderr, "getBlob(%s)\n", entry.TreeEntry.Hash.String())
  433. return nil, err
  434. }
  435. file, err_modules := commit.File(".gitmodules")
  436. if err_modules != nil {
  437. return nil, err
  438. }
  439. contents, err_modules := file.Contents()
  440. if err_modules != nil {
  441. return nil, err
  442. }
  443. modules := config.NewModules()
  444. err_modules = modules.Unmarshal([]byte(contents))
  445. if err_modules != nil {
  446. return nil, err
  447. }
  448. _, exists := modules.Submodules[entry.Name]
  449. if exists {
  450. // we found that this is a submodule
  451. return createDummyBlob(&entry.TreeEntry.Hash)
  452. }
  453. return nil, err
  454. }
  455. return blob, nil
  456. }
  457. func (analyser *Analyser) cacheBlobs(changes *object.Changes, commit *object.Commit) (
  458. *map[plumbing.Hash]*object.Blob, error) {
  459. cache := make(map[plumbing.Hash]*object.Blob)
  460. for _, change := range *changes {
  461. action, err := change.Action()
  462. if err != nil {
  463. return nil, err
  464. }
  465. switch action {
  466. case merkletrie.Insert:
  467. cache[change.To.TreeEntry.Hash], err = analyser.getBlob(&change.To, commit)
  468. if err != nil {
  469. fmt.Fprintf(os.Stderr, "file to %s\n", change.To.Name)
  470. }
  471. case merkletrie.Delete:
  472. cache[change.From.TreeEntry.Hash], err = analyser.getBlob(&change.From, commit)
  473. if err != nil {
  474. if err.Error() != git.ErrObjectNotFound.Error() {
  475. fmt.Fprintf(os.Stderr, "file from %s\n", change.From.Name)
  476. } else {
  477. cache[change.From.TreeEntry.Hash], err = createDummyBlob(
  478. &change.From.TreeEntry.Hash)
  479. }
  480. }
  481. case merkletrie.Modify:
  482. cache[change.To.TreeEntry.Hash], err = analyser.getBlob(&change.To, commit)
  483. if err != nil {
  484. fmt.Fprintf(os.Stderr, "file to %s\n", change.To.Name)
  485. }
  486. cache[change.From.TreeEntry.Hash], err = analyser.getBlob(&change.From, commit)
  487. if err != nil {
  488. fmt.Fprintf(os.Stderr, "file from %s\n", change.From.Name)
  489. }
  490. default:
  491. panic(fmt.Sprintf("unsupported action: %d", change.Action))
  492. }
  493. if err != nil {
  494. return nil, err
  495. }
  496. }
  497. return &cache, nil
  498. }
  499. func (analyser *Analyser) detectRenames(
  500. changes *object.Changes, cache *map[plumbing.Hash]*object.Blob) object.Changes {
  501. reduced_changes := make(object.Changes, 0, changes.Len())
  502. // Stage 1 - find renames by matching the hashes
  503. // n log(n)
  504. // We sort additions and deletions by hash and then do the single scan along
  505. // both slices.
  506. deleted := make(sortableChanges, 0, changes.Len())
  507. added := make(sortableChanges, 0, changes.Len())
  508. for _, change := range *changes {
  509. action, err := change.Action()
  510. if err != nil {
  511. panic(err)
  512. }
  513. switch action {
  514. case merkletrie.Insert:
  515. added = append(added, sortableChange{change, change.To.TreeEntry.Hash})
  516. case merkletrie.Delete:
  517. deleted = append(deleted, sortableChange{change, change.From.TreeEntry.Hash})
  518. case merkletrie.Modify:
  519. reduced_changes = append(reduced_changes, change)
  520. default:
  521. panic(fmt.Sprintf("unsupported action: %d", change.Action))
  522. }
  523. }
  524. sort.Sort(deleted)
  525. sort.Sort(added)
  526. a := 0
  527. d := 0
  528. still_deleted := make(object.Changes, 0, deleted.Len())
  529. still_added := make(object.Changes, 0, added.Len())
  530. for a < added.Len() && d < deleted.Len() {
  531. if added[a].hash == deleted[d].hash {
  532. reduced_changes = append(
  533. reduced_changes,
  534. &object.Change{From: deleted[d].change.From, To: added[a].change.To})
  535. a++
  536. d++
  537. } else if added[a].Less(&deleted[d]) {
  538. still_added = append(still_added, added[a].change)
  539. a++
  540. } else {
  541. still_deleted = append(still_deleted, deleted[d].change)
  542. d++
  543. }
  544. }
  545. for ; a < added.Len(); a++ {
  546. still_added = append(still_added, added[a].change)
  547. }
  548. for ; d < deleted.Len(); d++ {
  549. still_deleted = append(still_deleted, deleted[d].change)
  550. }
  551. // Stage 2 - apply the similarity threshold
  552. // n^2 but actually linear
  553. // We sort the blobs by size and do the single linear scan.
  554. added_blobs := make(sortableBlobs, 0, still_added.Len())
  555. deleted_blobs := make(sortableBlobs, 0, still_deleted.Len())
  556. for _, change := range still_added {
  557. blob := (*cache)[change.To.TreeEntry.Hash]
  558. added_blobs = append(
  559. added_blobs, sortableBlob{change: change, size: blob.Size})
  560. }
  561. for _, change := range still_deleted {
  562. blob := (*cache)[change.From.TreeEntry.Hash]
  563. deleted_blobs = append(
  564. deleted_blobs, sortableBlob{change: change, size: blob.Size})
  565. }
  566. sort.Sort(added_blobs)
  567. sort.Sort(deleted_blobs)
  568. d_start := 0
  569. for a = 0; a < added_blobs.Len(); a++ {
  570. my_blob := (*cache)[added_blobs[a].change.To.TreeEntry.Hash]
  571. my_size := added_blobs[a].size
  572. for d = d_start; d < deleted_blobs.Len() && !analyser.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  573. }
  574. d_start = d
  575. found_match := false
  576. for d = d_start; d < deleted_blobs.Len() && analyser.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  577. if analyser.blobsAreClose(
  578. my_blob, (*cache)[deleted_blobs[d].change.From.TreeEntry.Hash]) {
  579. found_match = true
  580. reduced_changes = append(
  581. reduced_changes,
  582. &object.Change{From: deleted_blobs[d].change.From,
  583. To: added_blobs[a].change.To})
  584. break
  585. }
  586. }
  587. if found_match {
  588. added_blobs = append(added_blobs[:a], added_blobs[a+1:]...)
  589. a--
  590. deleted_blobs = append(deleted_blobs[:d], deleted_blobs[d+1:]...)
  591. }
  592. }
  593. // Stage 3 - we give up, everything left are independent additions and deletions
  594. for _, blob := range added_blobs {
  595. reduced_changes = append(reduced_changes, blob.change)
  596. }
  597. for _, blob := range deleted_blobs {
  598. reduced_changes = append(reduced_changes, blob.change)
  599. }
  600. return reduced_changes
  601. }
  602. // Analyse calculates the line burndown statistics for the bound repository.
  603. //
  604. // commits is a slice with the sequential commit history. It shall start from
  605. // the root (ascending order).
  606. //
  607. // Returns the list of snapshots of the cumulative line edit times and the
  608. // similar lists for every file which is alive in HEAD.
  609. // The number of snapshots (the first dimension >[]<[]int64) depends on
  610. // Analyser.Sampling (the more Sampling, the less the value); the length of
  611. // each snapshot depends on Analyser.Granularity (the more Granularity,
  612. // the less the value).
  613. func (analyser *Analyser) Analyse(commits []*object.Commit) (
  614. [][]int64, map[string][][]int64, map[int][][]int64, [][]int64) {
  615. sampling := analyser.Sampling
  616. if sampling == 0 {
  617. sampling = 1
  618. }
  619. onProgress := analyser.OnProgress
  620. if onProgress == nil {
  621. onProgress = func(int, int) {}
  622. }
  623. if analyser.SimilarityThreshold < 0 || analyser.SimilarityThreshold > 100 {
  624. panic("hercules.Analyser: an invalid SimilarityThreshold was specified")
  625. }
  626. // current daily alive number of lines; key is the number of days from the
  627. // beginning of the history
  628. global_status := map[int]int64{}
  629. // weekly snapshots of status
  630. global_history := [][]int64{}
  631. // weekly snapshots of each file's status
  632. file_histories := map[string][][]int64{}
  633. // mapping <file path> -> hercules.File
  634. files := map[string]*File{}
  635. var day0 time.Time // will be initialized in the first iteration
  636. var prev_tree *object.Tree = nil
  637. var day, prev_day int
  638. for index, commit := range commits {
  639. onProgress(index, len(commits))
  640. tree, err := commit.Tree()
  641. if err != nil {
  642. panic(err)
  643. }
  644. if index == 0 {
  645. // first iteration - initialize the file objects from the tree
  646. day0 = commit.Author.When
  647. func() {
  648. file_iter := tree.Files()
  649. defer file_iter.Close()
  650. for {
  651. file, err := file_iter.Next()
  652. if err != nil {
  653. if err == io.EOF {
  654. break
  655. }
  656. panic(err)
  657. }
  658. lines, err := loc(&file.Blob)
  659. if err == nil {
  660. files[file.Name] = NewFile(0, lines, global_status, make(map[int]int64))
  661. }
  662. }
  663. }()
  664. } else {
  665. day = int(commit.Author.When.Sub(day0).Hours() / 24)
  666. if day < prev_day {
  667. // rebase makes miracles
  668. day = prev_day
  669. }
  670. delta := (day / sampling) - (prev_day / sampling)
  671. if delta > 0 {
  672. prev_day = day
  673. gs, fss := analyser.groupStatus(global_status, files, day)
  674. global_history = analyser.updateHistories(
  675. global_history, gs, file_histories, fss, delta)
  676. }
  677. tree_diff, err := object.DiffTree(prev_tree, tree)
  678. if err != nil {
  679. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  680. panic(err)
  681. }
  682. cache, err := analyser.cacheBlobs(&tree_diff, commit)
  683. if err != nil {
  684. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  685. panic(err)
  686. }
  687. tree_diff = analyser.detectRenames(&tree_diff, cache)
  688. for _, change := range tree_diff {
  689. action, err := change.Action()
  690. if err != nil {
  691. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  692. panic(err)
  693. }
  694. switch action {
  695. case merkletrie.Insert:
  696. analyser.handleInsertion(change, day, global_status, files, cache)
  697. case merkletrie.Delete:
  698. analyser.handleDeletion(change, day, global_status, files, cache)
  699. case merkletrie.Modify:
  700. func() {
  701. defer func() {
  702. r := recover()
  703. if r != nil {
  704. fmt.Fprintf(os.Stderr, "#%d - %s: modification error\n",
  705. index, commit.Hash.String())
  706. panic(r)
  707. }
  708. }()
  709. analyser.handleModification(change, day, global_status, files, cache)
  710. }()
  711. }
  712. }
  713. }
  714. prev_tree = tree
  715. }
  716. gs, fss := analyser.groupStatus(global_status, files, day)
  717. global_history = analyser.updateHistories(
  718. global_history, gs, file_histories, fss, 1)
  719. for key, statuses := range file_histories {
  720. if len(statuses) == len(global_history) {
  721. continue
  722. }
  723. padding := make([][]int64, len(global_history) - len(statuses))
  724. for i := range padding {
  725. padding[i] = make([]int64, len(global_status))
  726. }
  727. file_histories[key] = append(padding, statuses...)
  728. }
  729. var people_statuses map[int][][]int64
  730. var people_matrix [][]int64
  731. return global_history, file_histories, people_statuses, people_matrix
  732. }