analyser.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. package hercules
  2. import (
  3. "bufio"
  4. "bytes"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "os"
  9. "sort"
  10. "time"
  11. "unicode/utf8"
  12. "github.com/sergi/go-diff/diffmatchpatch"
  13. "gopkg.in/src-d/go-git.v4"
  14. "gopkg.in/src-d/go-git.v4/config"
  15. "gopkg.in/src-d/go-git.v4/plumbing"
  16. "gopkg.in/src-d/go-git.v4/plumbing/object"
  17. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  18. )
  19. // Analyser allows to gather the line burndown statistics for a Git repository.
  20. type Analyser struct {
  21. // Repository points to the analysed Git repository struct from go-git.
  22. Repository *git.Repository
  23. // Granularity sets the size of each band - the number of days it spans.
  24. // Smaller values provide better resolution but require more work and eat more
  25. // memory. 30 days is usually enough.
  26. Granularity int
  27. // Sampling sets how detailed is the statistic - the size of the interval in
  28. // days between consecutive measurements. It is usually a good idea to set it
  29. // <= Granularity. Try 15 or 30.
  30. Sampling int
  31. // SimilarityThreshold adjusts the heuristic to determine file renames.
  32. // It has the same units as cgit's -X rename-threshold or -M. Better to
  33. // set it to the default value of 90 (90%).
  34. SimilarityThreshold int
  35. // Debug activates the debugging mode. Analyse() runs slower in this mode
  36. // but it accurately checks all the intermediate states for invariant
  37. // violations.
  38. Debug bool
  39. // OnProgress is the callback which is invoked in Analyse() to output it's
  40. // progress. The first argument is the number of processed commits and the
  41. // second is the total number of commits.
  42. OnProgress func(int, int)
  43. }
  44. func checkClose(c io.Closer) {
  45. if err := c.Close(); err != nil {
  46. panic(err)
  47. }
  48. }
  49. func loc(file *object.Blob) (int, error) {
  50. reader, err := file.Reader()
  51. if err != nil {
  52. panic(err)
  53. }
  54. defer checkClose(reader)
  55. var scanner *bufio.Scanner
  56. buffer := make([]byte, bufio.MaxScanTokenSize)
  57. counter := 0
  58. for scanner == nil || scanner.Err() == bufio.ErrTooLong {
  59. if scanner != nil && !utf8.Valid(scanner.Bytes()) {
  60. return -1, errors.New("binary")
  61. }
  62. scanner = bufio.NewScanner(reader)
  63. scanner.Buffer(buffer, 0)
  64. for scanner.Scan() {
  65. if !utf8.Valid(scanner.Bytes()) {
  66. return -1, errors.New("binary")
  67. }
  68. counter++
  69. }
  70. }
  71. return counter, nil
  72. }
  73. func str(file *object.Blob) string {
  74. reader, err := file.Reader()
  75. if err != nil {
  76. panic(err)
  77. }
  78. defer checkClose(reader)
  79. buf := new(bytes.Buffer)
  80. buf.ReadFrom(reader)
  81. return buf.String()
  82. }
  83. type dummyIO struct {
  84. }
  85. func (dummyIO) Read(p []byte) (int, error) {
  86. return 0, io.EOF
  87. }
  88. func (dummyIO) Write(p []byte) (int, error) {
  89. return len(p), nil
  90. }
  91. func (dummyIO) Close() error {
  92. return nil
  93. }
  94. type dummyEncodedObject struct {
  95. FakeHash plumbing.Hash
  96. }
  97. func (obj dummyEncodedObject) Hash() plumbing.Hash {
  98. return obj.FakeHash
  99. }
  100. func (obj dummyEncodedObject) Type() plumbing.ObjectType {
  101. return plumbing.BlobObject
  102. }
  103. func (obj dummyEncodedObject) SetType(plumbing.ObjectType) {
  104. }
  105. func (obj dummyEncodedObject) Size() int64 {
  106. return 0
  107. }
  108. func (obj dummyEncodedObject) SetSize(int64) {
  109. }
  110. func (obj dummyEncodedObject) Reader() (io.ReadCloser, error) {
  111. return dummyIO{}, nil
  112. }
  113. func (obj dummyEncodedObject) Writer() (io.WriteCloser, error) {
  114. return dummyIO{}, nil
  115. }
  116. func createDummyBlob(hash *plumbing.Hash) (*object.Blob, error) {
  117. return object.DecodeBlob(dummyEncodedObject{*hash})
  118. }
  119. func (analyser *Analyser) handleInsertion(
  120. change *object.Change, day int, status map[int]int64, files map[string]*File,
  121. cache *map[plumbing.Hash]*object.Blob) {
  122. blob := (*cache)[change.To.TreeEntry.Hash]
  123. lines, err := loc(blob)
  124. if err != nil {
  125. return
  126. }
  127. name := change.To.Name
  128. file, exists := files[name]
  129. if exists {
  130. panic(fmt.Sprintf("file %s already exists", name))
  131. }
  132. file = NewFile(day, lines, status)
  133. files[name] = file
  134. }
  135. func (analyser *Analyser) handleDeletion(
  136. change *object.Change, day int, status map[int]int64, files map[string]*File,
  137. cache *map[plumbing.Hash]*object.Blob) {
  138. blob := (*cache)[change.From.TreeEntry.Hash]
  139. lines, err := loc(blob)
  140. if err != nil {
  141. return
  142. }
  143. name := change.From.Name
  144. file := files[name]
  145. file.Update(day, 0, 0, lines)
  146. delete(files, name)
  147. }
  148. func (analyser *Analyser) handleModification(
  149. change *object.Change, day int, status map[int]int64, files map[string]*File,
  150. cache *map[plumbing.Hash]*object.Blob) {
  151. blob_from := (*cache)[change.From.TreeEntry.Hash]
  152. blob_to := (*cache)[change.To.TreeEntry.Hash]
  153. // we are not validating UTF-8 here because for example
  154. // git/git 4f7770c87ce3c302e1639a7737a6d2531fe4b160 fetch-pack.c is invalid UTF-8
  155. str_from := str(blob_from)
  156. str_to := str(blob_to)
  157. file, exists := files[change.From.Name]
  158. if !exists {
  159. analyser.handleInsertion(change, day, status, files, cache)
  160. return
  161. }
  162. // possible rename
  163. if change.To.Name != change.From.Name {
  164. analyser.handleRename(change.From.Name, change.To.Name, files)
  165. }
  166. dmp := diffmatchpatch.New()
  167. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  168. if file.Len() != len(src) {
  169. fmt.Fprintf(os.Stderr, "====TREE====\n%s", file.Dump())
  170. panic(fmt.Sprintf("%s: internal integrity error src %d != %d %s -> %s",
  171. change.To.Name, len(src), file.Len(),
  172. change.From.TreeEntry.Hash.String(), change.To.TreeEntry.Hash.String()))
  173. }
  174. diffs := dmp.DiffMainRunes(src, dst, false)
  175. // we do not call RunesToDiffLines so the number of lines equals
  176. // to the rune count
  177. position := 0
  178. pending := diffmatchpatch.Diff{Text: ""}
  179. apply := func(edit diffmatchpatch.Diff) {
  180. length := utf8.RuneCountInString(edit.Text)
  181. if edit.Type == diffmatchpatch.DiffInsert {
  182. file.Update(day, position, length, 0)
  183. position += length
  184. } else {
  185. file.Update(day, position, 0, length)
  186. }
  187. if analyser.Debug {
  188. file.Validate()
  189. }
  190. }
  191. for _, edit := range diffs {
  192. dump_before := ""
  193. if analyser.Debug {
  194. dump_before = file.Dump()
  195. }
  196. length := utf8.RuneCountInString(edit.Text)
  197. func() {
  198. defer func() {
  199. r := recover()
  200. if r != nil {
  201. fmt.Fprintf(os.Stderr, "%s: internal diff error\n", change.To.Name)
  202. fmt.Fprintf(os.Stderr, "Update(%d, %d, %d (0), %d (0))\n", day, position,
  203. length, utf8.RuneCountInString(pending.Text))
  204. if dump_before != "" {
  205. fmt.Fprintf(os.Stderr, "====TREE BEFORE====\n%s====END====\n", dump_before)
  206. }
  207. fmt.Fprintf(os.Stderr, "====TREE AFTER====\n%s====END====\n", file.Dump())
  208. panic(r)
  209. }
  210. }()
  211. switch edit.Type {
  212. case diffmatchpatch.DiffEqual:
  213. if pending.Text != "" {
  214. apply(pending)
  215. pending.Text = ""
  216. }
  217. position += length
  218. case diffmatchpatch.DiffInsert:
  219. if pending.Text != "" {
  220. if pending.Type == diffmatchpatch.DiffInsert {
  221. panic("DiffInsert may not appear after DiffInsert")
  222. }
  223. file.Update(day, position, length, utf8.RuneCountInString(pending.Text))
  224. if analyser.Debug {
  225. file.Validate()
  226. }
  227. position += length
  228. pending.Text = ""
  229. } else {
  230. pending = edit
  231. }
  232. case diffmatchpatch.DiffDelete:
  233. if pending.Text != "" {
  234. panic("DiffDelete may not appear after DiffInsert/DiffDelete")
  235. }
  236. pending = edit
  237. default:
  238. panic(fmt.Sprintf("diff operation is not supported: %d", edit.Type))
  239. }
  240. }()
  241. }
  242. if pending.Text != "" {
  243. apply(pending)
  244. pending.Text = ""
  245. }
  246. if file.Len() != len(dst) {
  247. panic(fmt.Sprintf("%s: internal integrity error dst %d != %d",
  248. change.To.Name, len(dst), file.Len()))
  249. }
  250. }
  251. func (analyser *Analyser) handleRename(from, to string, files map[string]*File) {
  252. file, exists := files[from]
  253. if !exists {
  254. panic(fmt.Sprintf("file %s does not exist", from))
  255. }
  256. files[to] = file
  257. delete(files, from)
  258. }
  259. // Commits returns the critical path in the repository's history. It starts
  260. // from HEAD and traces commits backwards till the root. When it encounters
  261. // a merge (more than one parent), it always chooses the first parent.
  262. func (analyser *Analyser) Commits() []*object.Commit {
  263. result := []*object.Commit{}
  264. repository := analyser.Repository
  265. head, err := repository.Head()
  266. if err != nil {
  267. panic(err)
  268. }
  269. commit, err := repository.CommitObject(head.Hash())
  270. if err != nil {
  271. panic(err)
  272. }
  273. result = append(result, commit)
  274. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  275. if err != nil {
  276. panic(err)
  277. }
  278. result = append(result, commit)
  279. }
  280. // reverse the order
  281. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  282. result[i], result[j] = result[j], result[i]
  283. }
  284. return result
  285. }
  286. func (analyser *Analyser) groupStatus(status map[int]int64, day int) []int64 {
  287. granularity := analyser.Granularity
  288. if granularity == 0 {
  289. granularity = 1
  290. }
  291. day++
  292. adjust := 0
  293. if day%granularity < granularity-1 {
  294. adjust = 1
  295. }
  296. result := make([]int64, day/granularity+adjust)
  297. var group int64
  298. for i := 0; i < day; i++ {
  299. group += status[i]
  300. if i%granularity == (granularity - 1) {
  301. result[i/granularity] = group
  302. group = 0
  303. }
  304. }
  305. if day%granularity < granularity-1 {
  306. result[len(result)-1] = group
  307. }
  308. return result
  309. }
  310. type sortableChange struct {
  311. change *object.Change
  312. hash plumbing.Hash
  313. }
  314. type sortableChanges []sortableChange
  315. func (change *sortableChange) Less(other *sortableChange) bool {
  316. for x := 0; x < 20; x++ {
  317. if change.hash[x] < other.hash[x] {
  318. return true
  319. }
  320. }
  321. return false
  322. }
  323. func (slice sortableChanges) Len() int {
  324. return len(slice)
  325. }
  326. func (slice sortableChanges) Less(i, j int) bool {
  327. return slice[i].Less(&slice[j])
  328. }
  329. func (slice sortableChanges) Swap(i, j int) {
  330. slice[i], slice[j] = slice[j], slice[i]
  331. }
  332. type sortableBlob struct {
  333. change *object.Change
  334. size int64
  335. }
  336. type sortableBlobs []sortableBlob
  337. func (change *sortableBlob) Less(other *sortableBlob) bool {
  338. return change.size < other.size
  339. }
  340. func (slice sortableBlobs) Len() int {
  341. return len(slice)
  342. }
  343. func (slice sortableBlobs) Less(i, j int) bool {
  344. return slice[i].Less(&slice[j])
  345. }
  346. func (slice sortableBlobs) Swap(i, j int) {
  347. slice[i], slice[j] = slice[j], slice[i]
  348. }
  349. func (analyser *Analyser) sizesAreClose(size1 int64, size2 int64) bool {
  350. return abs64(size1-size2)*100/max64(1, min64(size1, size2)) <=
  351. int64(100-analyser.SimilarityThreshold)
  352. }
  353. func (analyser *Analyser) blobsAreClose(
  354. blob1 *object.Blob, blob2 *object.Blob) bool {
  355. str_from := str(blob1)
  356. str_to := str(blob2)
  357. dmp := diffmatchpatch.New()
  358. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  359. diffs := dmp.DiffMainRunes(src, dst, false)
  360. common := 0
  361. for _, edit := range diffs {
  362. if edit.Type == diffmatchpatch.DiffEqual {
  363. common += utf8.RuneCountInString(edit.Text)
  364. }
  365. }
  366. return common*100/max(1, min(len(src), len(dst))) >=
  367. analyser.SimilarityThreshold
  368. }
  369. func (analyser *Analyser) getBlob(entry *object.ChangeEntry, commit *object.Commit) (
  370. *object.Blob, error) {
  371. blob, err := analyser.Repository.BlobObject(entry.TreeEntry.Hash)
  372. if err != nil {
  373. if err.Error() != git.ErrObjectNotFound.Error() {
  374. fmt.Fprintf(os.Stderr, "getBlob(%s)\n", entry.TreeEntry.Hash.String())
  375. return nil, err
  376. }
  377. file, err_modules := commit.File(".gitmodules")
  378. if err_modules != nil {
  379. return nil, err
  380. }
  381. contents, err_modules := file.Contents()
  382. if err_modules != nil {
  383. return nil, err
  384. }
  385. modules := config.NewModules()
  386. err_modules = modules.Unmarshal([]byte(contents))
  387. if err_modules != nil {
  388. return nil, err
  389. }
  390. _, exists := modules.Submodules[entry.Name]
  391. if exists {
  392. // we found that this is a submodule
  393. return createDummyBlob(&entry.TreeEntry.Hash)
  394. }
  395. return nil, err
  396. }
  397. return blob, nil
  398. }
  399. func (analyser *Analyser) cacheBlobs(changes *object.Changes, commit *object.Commit) (
  400. *map[plumbing.Hash]*object.Blob, error) {
  401. cache := make(map[plumbing.Hash]*object.Blob)
  402. for _, change := range *changes {
  403. action, err := change.Action()
  404. if err != nil {
  405. return nil, err
  406. }
  407. switch action {
  408. case merkletrie.Insert:
  409. cache[change.To.TreeEntry.Hash], err = analyser.getBlob(&change.To, commit)
  410. if err != nil {
  411. fmt.Fprintf(os.Stderr, "file to %s\n", change.To.Name)
  412. }
  413. case merkletrie.Delete:
  414. cache[change.From.TreeEntry.Hash], err = analyser.getBlob(&change.From, commit)
  415. if err != nil {
  416. if err.Error() != git.ErrObjectNotFound.Error() {
  417. fmt.Fprintf(os.Stderr, "file from %s\n", change.From.Name)
  418. } else {
  419. cache[change.From.TreeEntry.Hash], err = createDummyBlob(
  420. &change.From.TreeEntry.Hash)
  421. }
  422. }
  423. case merkletrie.Modify:
  424. cache[change.To.TreeEntry.Hash], err = analyser.getBlob(&change.To, commit)
  425. if err != nil {
  426. fmt.Fprintf(os.Stderr, "file to %s\n", change.To.Name)
  427. }
  428. cache[change.From.TreeEntry.Hash], err = analyser.getBlob(&change.From, commit)
  429. if err != nil {
  430. fmt.Fprintf(os.Stderr, "file from %s\n", change.From.Name)
  431. }
  432. default:
  433. panic(fmt.Sprintf("unsupported action: %d", change.Action))
  434. }
  435. if err != nil {
  436. return nil, err
  437. }
  438. }
  439. return &cache, nil
  440. }
  441. func (analyser *Analyser) detectRenames(
  442. changes *object.Changes, cache *map[plumbing.Hash]*object.Blob) object.Changes {
  443. reduced_changes := make(object.Changes, 0, changes.Len())
  444. // Stage 1 - find renames by matching the hashes
  445. // n log(n)
  446. // We sort additions and deletions by hash and then do the single scan along
  447. // both slices.
  448. deleted := make(sortableChanges, 0, changes.Len())
  449. added := make(sortableChanges, 0, changes.Len())
  450. for _, change := range *changes {
  451. action, err := change.Action()
  452. if err != nil {
  453. panic(err)
  454. }
  455. switch action {
  456. case merkletrie.Insert:
  457. added = append(added, sortableChange{change, change.To.TreeEntry.Hash})
  458. case merkletrie.Delete:
  459. deleted = append(deleted, sortableChange{change, change.From.TreeEntry.Hash})
  460. case merkletrie.Modify:
  461. reduced_changes = append(reduced_changes, change)
  462. default:
  463. panic(fmt.Sprintf("unsupported action: %d", change.Action))
  464. }
  465. }
  466. sort.Sort(deleted)
  467. sort.Sort(added)
  468. a := 0
  469. d := 0
  470. still_deleted := make(object.Changes, 0, deleted.Len())
  471. still_added := make(object.Changes, 0, added.Len())
  472. for a < added.Len() && d < deleted.Len() {
  473. if added[a].hash == deleted[d].hash {
  474. reduced_changes = append(
  475. reduced_changes,
  476. &object.Change{From: deleted[d].change.From, To: added[a].change.To})
  477. a++
  478. d++
  479. } else if added[a].Less(&deleted[d]) {
  480. still_added = append(still_added, added[a].change)
  481. a++
  482. } else {
  483. still_deleted = append(still_deleted, deleted[d].change)
  484. d++
  485. }
  486. }
  487. for ; a < added.Len(); a++ {
  488. still_added = append(still_added, added[a].change)
  489. }
  490. for ; d < deleted.Len(); d++ {
  491. still_deleted = append(still_deleted, deleted[d].change)
  492. }
  493. // Stage 2 - apply the similarity threshold
  494. // n^2 but actually linear
  495. // We sort the blobs by size and do the single linear scan.
  496. added_blobs := make(sortableBlobs, 0, still_added.Len())
  497. deleted_blobs := make(sortableBlobs, 0, still_deleted.Len())
  498. for _, change := range still_added {
  499. blob := (*cache)[change.To.TreeEntry.Hash]
  500. added_blobs = append(
  501. added_blobs, sortableBlob{change: change, size: blob.Size})
  502. }
  503. for _, change := range still_deleted {
  504. blob := (*cache)[change.From.TreeEntry.Hash]
  505. deleted_blobs = append(
  506. deleted_blobs, sortableBlob{change: change, size: blob.Size})
  507. }
  508. sort.Sort(added_blobs)
  509. sort.Sort(deleted_blobs)
  510. d_start := 0
  511. for a = 0; a < added_blobs.Len(); a++ {
  512. my_blob := (*cache)[added_blobs[a].change.To.TreeEntry.Hash]
  513. my_size := added_blobs[a].size
  514. for d = d_start; d < deleted_blobs.Len() && !analyser.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  515. }
  516. d_start = d
  517. found_match := false
  518. for d = d_start; d < deleted_blobs.Len() && analyser.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  519. if analyser.blobsAreClose(
  520. my_blob, (*cache)[deleted_blobs[d].change.From.TreeEntry.Hash]) {
  521. found_match = true
  522. reduced_changes = append(
  523. reduced_changes,
  524. &object.Change{From: deleted_blobs[d].change.From,
  525. To: added_blobs[a].change.To})
  526. break
  527. }
  528. }
  529. if found_match {
  530. added_blobs = append(added_blobs[:a], added_blobs[a+1:]...)
  531. a--
  532. deleted_blobs = append(deleted_blobs[:d], deleted_blobs[d+1:]...)
  533. }
  534. }
  535. // Stage 3 - we give up, everything left are independent additions and deletions
  536. for _, blob := range added_blobs {
  537. reduced_changes = append(reduced_changes, blob.change)
  538. }
  539. for _, blob := range deleted_blobs {
  540. reduced_changes = append(reduced_changes, blob.change)
  541. }
  542. return reduced_changes
  543. }
  544. // Analyse calculates the line burndown statistics for the bound repository.
  545. //
  546. // commits is a slice with the sequential commit history. It shall start from
  547. // the root (ascending order).
  548. //
  549. // Returns the list of snapshots of the cumulative line edit times.
  550. // The number of snapshots (the first dimension >[]<[]int64) depends on
  551. // Analyser.Sampling (the more Sampling, the less the value); the length of
  552. // each snapshot depends on Analyser.Granularity (the more Granularity,
  553. // the less the value).
  554. func (analyser *Analyser) Analyse(commits []*object.Commit) [][]int64 {
  555. sampling := analyser.Sampling
  556. if sampling == 0 {
  557. sampling = 1
  558. }
  559. onProgress := analyser.OnProgress
  560. if onProgress == nil {
  561. onProgress = func(int, int) {}
  562. }
  563. if analyser.SimilarityThreshold < 0 || analyser.SimilarityThreshold > 100 {
  564. panic("hercules.Analyser: an invalid SimilarityThreshold was specified")
  565. }
  566. // current daily alive number of lines; key is the number of days from the
  567. // beginning of the history
  568. status := map[int]int64{}
  569. // weekly snapshots of status
  570. statuses := [][]int64{}
  571. // mapping <file path> -> hercules.File
  572. files := map[string]*File{}
  573. var day0 time.Time // will be initialized in the first iteration
  574. var prev_tree *object.Tree = nil
  575. var day, prev_day int
  576. for index, commit := range commits {
  577. onProgress(index, len(commits))
  578. tree, err := commit.Tree()
  579. if err != nil {
  580. panic(err)
  581. }
  582. if index == 0 {
  583. // first iteration - initialize the file objects from the tree
  584. day0 = commit.Author.When
  585. func() {
  586. file_iter := tree.Files()
  587. defer file_iter.Close()
  588. for {
  589. file, err := file_iter.Next()
  590. if err != nil {
  591. if err == io.EOF {
  592. break
  593. }
  594. panic(err)
  595. }
  596. lines, err := loc(&file.Blob)
  597. if err == nil {
  598. files[file.Name] = NewFile(0, lines, status)
  599. }
  600. }
  601. }()
  602. } else {
  603. day = int(commit.Author.When.Sub(day0).Hours() / 24)
  604. if day < prev_day {
  605. // rebase makes miracles
  606. day = prev_day
  607. }
  608. delta := (day / sampling) - (prev_day / sampling)
  609. if delta > 0 {
  610. prev_day = day
  611. gs := analyser.groupStatus(status, day)
  612. for i := 0; i < delta; i++ {
  613. statuses = append(statuses, gs)
  614. }
  615. }
  616. tree_diff, err := object.DiffTree(prev_tree, tree)
  617. if err != nil {
  618. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  619. panic(err)
  620. }
  621. cache, err := analyser.cacheBlobs(&tree_diff, commit)
  622. if err != nil {
  623. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  624. panic(err)
  625. }
  626. tree_diff = analyser.detectRenames(&tree_diff, cache)
  627. for _, change := range tree_diff {
  628. action, err := change.Action()
  629. if err != nil {
  630. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  631. panic(err)
  632. }
  633. switch action {
  634. case merkletrie.Insert:
  635. analyser.handleInsertion(change, day, status, files, cache)
  636. case merkletrie.Delete:
  637. analyser.handleDeletion(change, day, status, files, cache)
  638. case merkletrie.Modify:
  639. func() {
  640. defer func() {
  641. r := recover()
  642. if r != nil {
  643. fmt.Fprintf(os.Stderr, "#%d - %s: modification error\n",
  644. index, commit.Hash.String())
  645. panic(r)
  646. }
  647. }()
  648. analyser.handleModification(change, day, status, files, cache)
  649. }()
  650. }
  651. }
  652. }
  653. prev_tree = tree
  654. }
  655. gs := analyser.groupStatus(status, day)
  656. statuses = append(statuses, gs)
  657. return statuses
  658. }