analyser.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. package hercules
  2. import (
  3. "bufio"
  4. "bytes"
  5. "errors"
  6. "fmt"
  7. "io"
  8. "os"
  9. "sort"
  10. "time"
  11. "unicode/utf8"
  12. "github.com/sergi/go-diff/diffmatchpatch"
  13. "gopkg.in/src-d/go-git.v4"
  14. "gopkg.in/src-d/go-git.v4/plumbing"
  15. "gopkg.in/src-d/go-git.v4/plumbing/object"
  16. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  17. "gopkg.in/src-d/go-git.v4/config"
  18. )
  19. type Analyser struct {
  20. Repository *git.Repository
  21. Granularity int
  22. Sampling int
  23. SimilarityThreshold int
  24. Debug bool
  25. OnProgress func(int, int)
  26. }
  27. func checkClose(c io.Closer) {
  28. if err := c.Close(); err != nil {
  29. panic(err)
  30. }
  31. }
  32. func loc(file *object.Blob) (int, error) {
  33. reader, err := file.Reader()
  34. if err != nil {
  35. panic(err)
  36. }
  37. defer checkClose(reader)
  38. scanner := bufio.NewScanner(reader)
  39. counter := 0
  40. for scanner.Scan() {
  41. if !utf8.Valid(scanner.Bytes()) {
  42. return -1, errors.New("binary")
  43. }
  44. counter++
  45. }
  46. return counter, nil
  47. }
  48. func str(file *object.Blob) string {
  49. reader, err := file.Reader()
  50. if err != nil {
  51. panic(err)
  52. }
  53. defer checkClose(reader)
  54. buf := new(bytes.Buffer)
  55. buf.ReadFrom(reader)
  56. return buf.String()
  57. }
  58. type DummyIO struct {
  59. }
  60. func (DummyIO) Read(p []byte) (int, error) {
  61. return 0, io.EOF
  62. }
  63. func (DummyIO) Write(p []byte) (int, error) {
  64. return len(p), nil
  65. }
  66. func (DummyIO) Close() error {
  67. return nil
  68. }
  69. type DummyEncodedObject struct {
  70. FakeHash plumbing.Hash
  71. }
  72. func (obj DummyEncodedObject) Hash() plumbing.Hash {
  73. return obj.FakeHash
  74. }
  75. func (obj DummyEncodedObject) Type() plumbing.ObjectType {
  76. return plumbing.BlobObject
  77. }
  78. func (obj DummyEncodedObject) SetType(plumbing.ObjectType) {
  79. }
  80. func (obj DummyEncodedObject) Size() int64 {
  81. return 0
  82. }
  83. func (obj DummyEncodedObject) SetSize(int64) {
  84. }
  85. func (obj DummyEncodedObject) Reader() (io.ReadCloser, error) {
  86. return DummyIO{}, nil
  87. }
  88. func (obj DummyEncodedObject) Writer() (io.WriteCloser, error) {
  89. return DummyIO{}, nil
  90. }
  91. func (analyser *Analyser) handleInsertion(
  92. change *object.Change, day int, status map[int]int64, files map[string]*File,
  93. cache *map[plumbing.Hash]*object.Blob) {
  94. blob := (*cache)[change.To.TreeEntry.Hash]
  95. lines, err := loc(blob)
  96. if err != nil {
  97. return
  98. }
  99. name := change.To.Name
  100. file, exists := files[name]
  101. if exists {
  102. panic(fmt.Sprintf("file %s already exists", name))
  103. }
  104. file = NewFile(day, lines, status)
  105. files[name] = file
  106. }
  107. func (analyser *Analyser) handleDeletion(
  108. change *object.Change, day int, status map[int]int64, files map[string]*File,
  109. cache *map[plumbing.Hash]*object.Blob) {
  110. blob := (*cache)[change.From.TreeEntry.Hash]
  111. lines, err := loc(blob)
  112. if err != nil {
  113. return
  114. }
  115. name := change.From.Name
  116. file := files[name]
  117. file.Update(day, 0, 0, lines)
  118. delete(files, name)
  119. }
  120. func (analyser *Analyser) handleModification(
  121. change *object.Change, day int, status map[int]int64, files map[string]*File,
  122. cache *map[plumbing.Hash]*object.Blob) {
  123. blob_from := (*cache)[change.From.TreeEntry.Hash]
  124. blob_to := (*cache)[change.To.TreeEntry.Hash]
  125. // we are not validating UTF-8 here because for example
  126. // git/git 4f7770c87ce3c302e1639a7737a6d2531fe4b160 fetch-pack.c is invalid UTF-8
  127. str_from := str(blob_from)
  128. str_to := str(blob_to)
  129. file, exists := files[change.From.Name]
  130. if !exists {
  131. analyser.handleInsertion(change, day, status, files, cache)
  132. return
  133. }
  134. // possible rename
  135. if change.To.Name != change.From.Name {
  136. analyser.handleRename(change.From.Name, change.To.Name, files)
  137. }
  138. dmp := diffmatchpatch.New()
  139. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  140. if file.Len() != len(src) {
  141. panic(fmt.Sprintf("%s: internal integrity error src %d != %d",
  142. change.To.Name, len(src), file.Len()))
  143. }
  144. diffs := dmp.DiffMainRunes(src, dst, false)
  145. // we do not call RunesToDiffLines so the number of lines equals
  146. // to the rune count
  147. position := 0
  148. pending := diffmatchpatch.Diff{Text: ""}
  149. apply := func(edit diffmatchpatch.Diff) {
  150. length := utf8.RuneCountInString(edit.Text)
  151. if edit.Type == diffmatchpatch.DiffInsert {
  152. file.Update(day, position, length, 0)
  153. position += length
  154. } else {
  155. file.Update(day, position, 0, length)
  156. }
  157. if analyser.Debug {
  158. file.Validate()
  159. }
  160. }
  161. for _, edit := range diffs {
  162. dump_before := ""
  163. if analyser.Debug {
  164. dump_before = file.Dump()
  165. }
  166. length := utf8.RuneCountInString(edit.Text)
  167. func() {
  168. defer func() {
  169. r := recover()
  170. if r != nil {
  171. fmt.Fprintf(os.Stderr, "%s: internal diff error\n", change.To.Name)
  172. fmt.Fprintf(os.Stderr, "Update(%d, %d, %d (0), %d (0))\n", day, position,
  173. length, utf8.RuneCountInString(pending.Text))
  174. if dump_before != "" {
  175. fmt.Fprintf(os.Stderr, "====TREE BEFORE====\n%s====END====\n", dump_before)
  176. }
  177. fmt.Fprintf(os.Stderr, "====TREE AFTER====\n%s====END====\n", file.Dump())
  178. panic(r)
  179. }
  180. }()
  181. switch edit.Type {
  182. case diffmatchpatch.DiffEqual:
  183. if pending.Text != "" {
  184. apply(pending)
  185. pending.Text = ""
  186. }
  187. position += length
  188. case diffmatchpatch.DiffInsert:
  189. if pending.Text != "" {
  190. if pending.Type == diffmatchpatch.DiffInsert {
  191. panic("DiffInsert may not appear after DiffInsert")
  192. }
  193. file.Update(day, position, length, utf8.RuneCountInString(pending.Text))
  194. if analyser.Debug {
  195. file.Validate()
  196. }
  197. position += length
  198. pending.Text = ""
  199. } else {
  200. pending = edit
  201. }
  202. case diffmatchpatch.DiffDelete:
  203. if pending.Text != "" {
  204. panic("DiffDelete may not appear after DiffInsert/DiffDelete")
  205. }
  206. pending = edit
  207. default:
  208. panic(fmt.Sprintf("diff operation is not supported: %d", edit.Type))
  209. }
  210. }()
  211. }
  212. if pending.Text != "" {
  213. apply(pending)
  214. pending.Text = ""
  215. }
  216. if file.Len() != len(dst) {
  217. panic(fmt.Sprintf("%s: internal integrity error dst %d != %d",
  218. change.To.Name, len(dst), file.Len()))
  219. }
  220. }
  221. func (analyser *Analyser) handleRename(from, to string, files map[string]*File) {
  222. file, exists := files[from]
  223. if !exists {
  224. panic(fmt.Sprintf("file %s does not exist", from))
  225. }
  226. files[to] = file
  227. delete(files, from)
  228. }
  229. func (analyser *Analyser) Commits() []*object.Commit {
  230. result := []*object.Commit{}
  231. repository := analyser.Repository
  232. head, err := repository.Head()
  233. if err != nil {
  234. panic(err)
  235. }
  236. commit, err := repository.CommitObject(head.Hash())
  237. if err != nil {
  238. panic(err)
  239. }
  240. result = append(result, commit)
  241. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  242. if err != nil {
  243. panic(err)
  244. }
  245. result = append(result, commit)
  246. }
  247. // reverse the order
  248. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  249. result[i], result[j] = result[j], result[i]
  250. }
  251. return result
  252. }
  253. func (analyser *Analyser) groupStatus(status map[int]int64, day int) []int64 {
  254. granularity := analyser.Granularity
  255. if granularity == 0 {
  256. granularity = 1
  257. }
  258. day++
  259. adjust := 0
  260. if day%granularity < granularity-1 {
  261. adjust = 1
  262. }
  263. result := make([]int64, day/granularity+adjust)
  264. var group int64
  265. for i := 0; i < day; i++ {
  266. group += status[i]
  267. if i%granularity == (granularity - 1) {
  268. result[i/granularity] = group
  269. group = 0
  270. }
  271. }
  272. if day%granularity < granularity-1 {
  273. result[len(result)-1] = group
  274. }
  275. return result
  276. }
  277. type sortableChange struct {
  278. change *object.Change
  279. hash plumbing.Hash
  280. }
  281. type sortableChanges []sortableChange
  282. func (change *sortableChange) Less(other *sortableChange) bool {
  283. for x := 0; x < 20; x++ {
  284. if change.hash[x] < other.hash[x] {
  285. return true
  286. }
  287. }
  288. return false
  289. }
  290. func (slice sortableChanges) Len() int {
  291. return len(slice)
  292. }
  293. func (slice sortableChanges) Less(i, j int) bool {
  294. return slice[i].Less(&slice[j])
  295. }
  296. func (slice sortableChanges) Swap(i, j int) {
  297. slice[i], slice[j] = slice[j], slice[i]
  298. }
  299. type sortableBlob struct {
  300. change *object.Change
  301. size int64
  302. }
  303. type sortableBlobs []sortableBlob
  304. func (change *sortableBlob) Less(other *sortableBlob) bool {
  305. return change.size < other.size
  306. }
  307. func (slice sortableBlobs) Len() int {
  308. return len(slice)
  309. }
  310. func (slice sortableBlobs) Less(i, j int) bool {
  311. return slice[i].Less(&slice[j])
  312. }
  313. func (slice sortableBlobs) Swap(i, j int) {
  314. slice[i], slice[j] = slice[j], slice[i]
  315. }
  316. func (analyser *Analyser) sizesAreClose(size1 int64, size2 int64) bool {
  317. return abs64(size1-size2)*100/max64(1, min64(size1, size2)) <=
  318. int64(100-analyser.SimilarityThreshold)
  319. }
  320. func (analyser *Analyser) blobsAreClose(
  321. blob1 *object.Blob, blob2 *object.Blob) bool {
  322. str_from := str(blob1)
  323. str_to := str(blob2)
  324. dmp := diffmatchpatch.New()
  325. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  326. diffs := dmp.DiffMainRunes(src, dst, false)
  327. common := 0
  328. for _, edit := range diffs {
  329. if edit.Type == diffmatchpatch.DiffEqual {
  330. common += utf8.RuneCountInString(edit.Text)
  331. }
  332. }
  333. return common*100/max(1, min(len(src), len(dst))) >=
  334. analyser.SimilarityThreshold
  335. }
  336. func (analyser *Analyser) getBlob(entry *object.ChangeEntry, commit *object.Commit) (
  337. *object.Blob, error) {
  338. blob, err := analyser.Repository.BlobObject(entry.TreeEntry.Hash)
  339. if err != nil {
  340. if err.Error() != git.ErrObjectNotFound.Error() {
  341. fmt.Fprintf(os.Stderr, "getBlob(%s)\n", entry.TreeEntry.Hash.String())
  342. return nil, err
  343. }
  344. file, err_modules := commit.File(".gitmodules")
  345. if err_modules != nil {
  346. return nil, err
  347. }
  348. contents, err_modules := file.Contents()
  349. if err_modules != nil {
  350. return nil, err
  351. }
  352. modules := config.NewModules()
  353. err_modules = modules.Unmarshal([]byte(contents))
  354. if err_modules != nil {
  355. return nil, err
  356. }
  357. _, exists := modules.Submodules[entry.Name]
  358. if exists {
  359. // we found that this is a submodule
  360. return object.DecodeBlob(DummyEncodedObject{entry.TreeEntry.Hash})
  361. }
  362. return nil, err
  363. }
  364. return blob, nil
  365. }
  366. func (analyser *Analyser) cacheBlobs(changes *object.Changes, commit *object.Commit) (
  367. *map[plumbing.Hash]*object.Blob, error) {
  368. cache := make(map[plumbing.Hash]*object.Blob)
  369. for _, change := range *changes {
  370. action, err := change.Action()
  371. if err != nil {
  372. return nil, err
  373. }
  374. switch action {
  375. case merkletrie.Insert:
  376. cache[change.To.TreeEntry.Hash], err = analyser.getBlob(&change.To, commit)
  377. if err != nil {
  378. fmt.Fprintf(os.Stderr, "file to %s\n", change.To.Name)
  379. }
  380. case merkletrie.Delete:
  381. cache[change.From.TreeEntry.Hash], err = analyser.getBlob(&change.From, commit)
  382. if err != nil {
  383. fmt.Fprintf(os.Stderr, "file from %s\n", change.From.Name)
  384. }
  385. case merkletrie.Modify:
  386. cache[change.To.TreeEntry.Hash], err = analyser.getBlob(&change.To, commit)
  387. if err != nil {
  388. fmt.Fprintf(os.Stderr, "file to %s\n", change.To.Name)
  389. }
  390. cache[change.From.TreeEntry.Hash], err = analyser.getBlob(&change.From, commit)
  391. if err != nil {
  392. fmt.Fprintf(os.Stderr, "file from %s\n", change.From.Name)
  393. }
  394. default:
  395. panic(fmt.Sprintf("unsupported action: %d", change.Action))
  396. }
  397. if err != nil {
  398. return nil, err
  399. }
  400. }
  401. return &cache, nil
  402. }
  403. func (analyser *Analyser) detectRenames(
  404. changes *object.Changes, cache *map[plumbing.Hash]*object.Blob) object.Changes {
  405. reduced_changes := make(object.Changes, 0, changes.Len())
  406. // Stage 1 - find renames by matching the hashes
  407. // n log(n)
  408. // We sort additions and deletions by hash and then do the single scan along
  409. // both slices.
  410. deleted := make(sortableChanges, 0, changes.Len())
  411. added := make(sortableChanges, 0, changes.Len())
  412. for _, change := range *changes {
  413. action, err := change.Action()
  414. if err != nil {
  415. panic(err)
  416. }
  417. switch action {
  418. case merkletrie.Insert:
  419. added = append(added, sortableChange{change, change.To.TreeEntry.Hash})
  420. case merkletrie.Delete:
  421. deleted = append(deleted, sortableChange{change, change.From.TreeEntry.Hash})
  422. case merkletrie.Modify:
  423. reduced_changes = append(reduced_changes, change)
  424. default:
  425. panic(fmt.Sprintf("unsupported action: %d", change.Action))
  426. }
  427. }
  428. sort.Sort(deleted)
  429. sort.Sort(added)
  430. a := 0
  431. d := 0
  432. still_deleted := make(object.Changes, 0, deleted.Len())
  433. still_added := make(object.Changes, 0, added.Len())
  434. for a < added.Len() && d < deleted.Len() {
  435. if added[a].hash == deleted[d].hash {
  436. reduced_changes = append(
  437. reduced_changes,
  438. &object.Change{From: deleted[d].change.From, To: added[a].change.To})
  439. a++
  440. d++
  441. } else if added[a].Less(&deleted[d]) {
  442. still_added = append(still_added, added[a].change)
  443. a++
  444. } else {
  445. still_deleted = append(still_deleted, deleted[d].change)
  446. d++
  447. }
  448. }
  449. for ; a < added.Len(); a++ {
  450. still_added = append(still_added, added[a].change)
  451. }
  452. for ; d < deleted.Len(); d++ {
  453. still_deleted = append(still_deleted, deleted[d].change)
  454. }
  455. // Stage 2 - apply the similarity threshold
  456. // n^2 but actually linear
  457. // We sort the blobs by size and do the single linear scan.
  458. added_blobs := make(sortableBlobs, 0, still_added.Len())
  459. deleted_blobs := make(sortableBlobs, 0, still_deleted.Len())
  460. for _, change := range still_added {
  461. blob := (*cache)[change.To.TreeEntry.Hash]
  462. added_blobs = append(
  463. added_blobs, sortableBlob{change: change, size: blob.Size})
  464. }
  465. for _, change := range still_deleted {
  466. blob := (*cache)[change.From.TreeEntry.Hash]
  467. deleted_blobs = append(
  468. deleted_blobs, sortableBlob{change: change, size: blob.Size})
  469. }
  470. sort.Sort(added_blobs)
  471. sort.Sort(deleted_blobs)
  472. d_start := 0
  473. for a = 0; a < added_blobs.Len(); a++ {
  474. my_blob := (*cache)[added_blobs[a].change.To.TreeEntry.Hash]
  475. my_size := added_blobs[a].size
  476. for d = d_start; d < deleted_blobs.Len() && !analyser.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  477. }
  478. d_start = d
  479. found_match := false
  480. for d = d_start; d < deleted_blobs.Len() && analyser.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  481. if analyser.blobsAreClose(
  482. my_blob, (*cache)[deleted_blobs[d].change.From.TreeEntry.Hash]) {
  483. found_match = true
  484. reduced_changes = append(
  485. reduced_changes,
  486. &object.Change{From: deleted_blobs[d].change.From,
  487. To: added_blobs[a].change.To})
  488. break
  489. }
  490. }
  491. if found_match {
  492. added_blobs = append(added_blobs[:a], added_blobs[a+1:]...)
  493. a--
  494. deleted_blobs = append(deleted_blobs[:d], deleted_blobs[d+1:]...)
  495. }
  496. }
  497. // Stage 3 - we give up, everything left are independent additions and deletions
  498. for _, blob := range added_blobs {
  499. reduced_changes = append(reduced_changes, blob.change)
  500. }
  501. for _, blob := range deleted_blobs {
  502. reduced_changes = append(reduced_changes, blob.change)
  503. }
  504. return reduced_changes
  505. }
  506. func (analyser *Analyser) Analyse(commits []*object.Commit) [][]int64 {
  507. sampling := analyser.Sampling
  508. if sampling == 0 {
  509. sampling = 1
  510. }
  511. onProgress := analyser.OnProgress
  512. if onProgress == nil {
  513. onProgress = func(int, int) {}
  514. }
  515. if analyser.SimilarityThreshold < 0 || analyser.SimilarityThreshold > 100 {
  516. panic("hercules.Analyser: an invalid SimilarityThreshold was specified")
  517. }
  518. // current daily alive number of lines; key is the number of days from the
  519. // beginning of the history
  520. status := map[int]int64{}
  521. // weekly snapshots of status
  522. statuses := [][]int64{}
  523. // mapping <file path> -> hercules.File
  524. files := map[string]*File{}
  525. var day0 time.Time // will be initialized in the first iteration
  526. var prev_tree *object.Tree = nil
  527. prev_day := 0
  528. for index, commit := range commits {
  529. onProgress(index, len(commits))
  530. tree, err := commit.Tree()
  531. if err != nil {
  532. panic(err)
  533. }
  534. if index == 0 {
  535. // first iteration - initialize the file objects from the tree
  536. day0 = commit.Author.When
  537. func() {
  538. file_iter := tree.Files()
  539. defer file_iter.Close()
  540. for {
  541. file, err := file_iter.Next()
  542. if err != nil {
  543. if err == io.EOF {
  544. break
  545. }
  546. panic(err)
  547. }
  548. lines, err := loc(&file.Blob)
  549. if err == nil {
  550. files[file.Name] = NewFile(0, lines, status)
  551. }
  552. }
  553. }()
  554. } else {
  555. day := int(commit.Author.When.Sub(day0).Hours() / 24)
  556. delta := (day / sampling) - (prev_day / sampling)
  557. if delta > 0 {
  558. prev_day = day
  559. gs := analyser.groupStatus(status, day)
  560. for i := 0; i < delta; i++ {
  561. statuses = append(statuses, gs)
  562. }
  563. }
  564. tree_diff, err := object.DiffTree(prev_tree, tree)
  565. if err != nil {
  566. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  567. panic(err)
  568. }
  569. cache, err := analyser.cacheBlobs(&tree_diff, commit)
  570. if err != nil {
  571. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  572. panic(err)
  573. }
  574. tree_diff = analyser.detectRenames(&tree_diff, cache)
  575. for _, change := range tree_diff {
  576. action, err := change.Action()
  577. if err != nil {
  578. fmt.Fprintf(os.Stderr, "commit #%d %s\n", index, commit.Hash.String())
  579. panic(err)
  580. }
  581. switch action {
  582. case merkletrie.Insert:
  583. analyser.handleInsertion(change, day, status, files, cache)
  584. case merkletrie.Delete:
  585. analyser.handleDeletion(change, day, status, files, cache)
  586. case merkletrie.Modify:
  587. func() {
  588. defer func() {
  589. r := recover()
  590. if r != nil {
  591. fmt.Fprintf(os.Stderr, "#%d - %s: modification error\n",
  592. index, commit.Hash.String())
  593. panic(r)
  594. }
  595. }()
  596. analyser.handleModification(change, day, status, files, cache)
  597. }()
  598. }
  599. }
  600. }
  601. prev_tree = tree
  602. }
  603. return statuses
  604. }