renames.go 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. package hercules
  2. import (
  3. "fmt"
  4. "sort"
  5. "unicode/utf8"
  6. "github.com/sergi/go-diff/diffmatchpatch"
  7. "gopkg.in/src-d/go-git.v4"
  8. "gopkg.in/src-d/go-git.v4/plumbing"
  9. "gopkg.in/src-d/go-git.v4/plumbing/object"
  10. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  11. )
  12. type RenameAnalysis struct {
  13. // SimilarityThreshold adjusts the heuristic to determine file renames.
  14. // It has the same units as cgit's -X rename-threshold or -M. Better to
  15. // set it to the default value of 90 (90%).
  16. SimilarityThreshold int
  17. repository *git.Repository
  18. }
  19. func (ra *RenameAnalysis) Name() string {
  20. return "RenameAnalysis"
  21. }
  22. func (ra *RenameAnalysis) Provides() []string {
  23. arr := [...]string{"renamed_changes"}
  24. return arr[:]
  25. }
  26. func (ra *RenameAnalysis) Requires() []string {
  27. arr := [...]string{"blob_cache", "changes"}
  28. return arr[:]
  29. }
  30. func (ra *RenameAnalysis) Initialize(repository *git.Repository) {
  31. if ra.SimilarityThreshold < 0 || ra.SimilarityThreshold > 100 {
  32. panic("hercules.RenameAnalysis: an invalid SimilarityThreshold was specified")
  33. }
  34. ra.repository = repository
  35. }
  36. func (ra *RenameAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  37. changes := deps["changes"].(object.Changes)
  38. cache := deps["blob_cache"].(map[plumbing.Hash]*object.Blob)
  39. reduced_changes := make(object.Changes, 0, changes.Len())
  40. // Stage 1 - find renames by matching the hashes
  41. // n log(n)
  42. // We sort additions and deletions by hash and then do the single scan along
  43. // both slices.
  44. deleted := make(sortableChanges, 0, changes.Len())
  45. added := make(sortableChanges, 0, changes.Len())
  46. for _, change := range changes {
  47. action, err := change.Action()
  48. if err != nil {
  49. return nil, err
  50. }
  51. switch action {
  52. case merkletrie.Insert:
  53. added = append(added, sortableChange{change, change.To.TreeEntry.Hash})
  54. case merkletrie.Delete:
  55. deleted = append(deleted, sortableChange{change, change.From.TreeEntry.Hash})
  56. case merkletrie.Modify:
  57. reduced_changes = append(reduced_changes, change)
  58. default:
  59. panic(fmt.Sprintf("unsupported action: %d", change.Action))
  60. }
  61. }
  62. sort.Sort(deleted)
  63. sort.Sort(added)
  64. a := 0
  65. d := 0
  66. still_deleted := make(object.Changes, 0, deleted.Len())
  67. still_added := make(object.Changes, 0, added.Len())
  68. for a < added.Len() && d < deleted.Len() {
  69. if added[a].hash == deleted[d].hash {
  70. reduced_changes = append(
  71. reduced_changes,
  72. &object.Change{From: deleted[d].change.From, To: added[a].change.To})
  73. a++
  74. d++
  75. } else if added[a].Less(&deleted[d]) {
  76. still_added = append(still_added, added[a].change)
  77. a++
  78. } else {
  79. still_deleted = append(still_deleted, deleted[d].change)
  80. d++
  81. }
  82. }
  83. for ; a < added.Len(); a++ {
  84. still_added = append(still_added, added[a].change)
  85. }
  86. for ; d < deleted.Len(); d++ {
  87. still_deleted = append(still_deleted, deleted[d].change)
  88. }
  89. // Stage 2 - apply the similarity threshold
  90. // n^2 but actually linear
  91. // We sort the blobs by size and do the single linear scan.
  92. added_blobs := make(sortableBlobs, 0, still_added.Len())
  93. deleted_blobs := make(sortableBlobs, 0, still_deleted.Len())
  94. for _, change := range still_added {
  95. blob := cache[change.To.TreeEntry.Hash]
  96. added_blobs = append(
  97. added_blobs, sortableBlob{change: change, size: blob.Size})
  98. }
  99. for _, change := range still_deleted {
  100. blob := cache[change.From.TreeEntry.Hash]
  101. deleted_blobs = append(
  102. deleted_blobs, sortableBlob{change: change, size: blob.Size})
  103. }
  104. sort.Sort(added_blobs)
  105. sort.Sort(deleted_blobs)
  106. d_start := 0
  107. for a = 0; a < added_blobs.Len(); a++ {
  108. my_blob := cache[added_blobs[a].change.To.TreeEntry.Hash]
  109. my_size := added_blobs[a].size
  110. for d = d_start; d < deleted_blobs.Len() && !ra.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  111. }
  112. d_start = d
  113. found_match := false
  114. for d = d_start; d < deleted_blobs.Len() && ra.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  115. blobsAreClose, err := ra.blobsAreClose(
  116. my_blob, cache[deleted_blobs[d].change.From.TreeEntry.Hash])
  117. if err != nil {
  118. return nil, err
  119. }
  120. if blobsAreClose {
  121. found_match = true
  122. reduced_changes = append(
  123. reduced_changes,
  124. &object.Change{From: deleted_blobs[d].change.From,
  125. To: added_blobs[a].change.To})
  126. break
  127. }
  128. }
  129. if found_match {
  130. added_blobs = append(added_blobs[:a], added_blobs[a+1:]...)
  131. a--
  132. deleted_blobs = append(deleted_blobs[:d], deleted_blobs[d+1:]...)
  133. }
  134. }
  135. // Stage 3 - we give up, everything left are independent additions and deletions
  136. for _, blob := range added_blobs {
  137. reduced_changes = append(reduced_changes, blob.change)
  138. }
  139. for _, blob := range deleted_blobs {
  140. reduced_changes = append(reduced_changes, blob.change)
  141. }
  142. return map[string]interface{}{"renamed_changes": reduced_changes}, nil
  143. }
  144. func (ra *RenameAnalysis) Finalize() interface{} {
  145. return nil
  146. }
  147. func (ra *RenameAnalysis) sizesAreClose(size1 int64, size2 int64) bool {
  148. return abs64(size1-size2)*100/max64(1, min64(size1, size2)) <=
  149. int64(100-ra.SimilarityThreshold)
  150. }
  151. func (ra *RenameAnalysis) blobsAreClose(
  152. blob1 *object.Blob, blob2 *object.Blob) (bool, error) {
  153. str_from, err := blobToString(blob1)
  154. if err != nil {
  155. return false, err
  156. }
  157. str_to, err := blobToString(blob2)
  158. if err != nil {
  159. return false, err
  160. }
  161. dmp := diffmatchpatch.New()
  162. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  163. diffs := dmp.DiffMainRunes(src, dst, false)
  164. common := 0
  165. for _, edit := range diffs {
  166. if edit.Type == diffmatchpatch.DiffEqual {
  167. common += utf8.RuneCountInString(edit.Text)
  168. }
  169. }
  170. return common*100/max(1, min(len(src), len(dst))) >= ra.SimilarityThreshold, nil
  171. }
  172. type sortableChange struct {
  173. change *object.Change
  174. hash plumbing.Hash
  175. }
  176. type sortableChanges []sortableChange
  177. func (change *sortableChange) Less(other *sortableChange) bool {
  178. for x := 0; x < 20; x++ {
  179. if change.hash[x] < other.hash[x] {
  180. return true
  181. }
  182. }
  183. return false
  184. }
  185. func (slice sortableChanges) Len() int {
  186. return len(slice)
  187. }
  188. func (slice sortableChanges) Less(i, j int) bool {
  189. return slice[i].Less(&slice[j])
  190. }
  191. func (slice sortableChanges) Swap(i, j int) {
  192. slice[i], slice[j] = slice[j], slice[i]
  193. }
  194. type sortableBlob struct {
  195. change *object.Change
  196. size int64
  197. }
  198. type sortableBlobs []sortableBlob
  199. func (change *sortableBlob) Less(other *sortableBlob) bool {
  200. return change.size < other.size
  201. }
  202. func (slice sortableBlobs) Len() int {
  203. return len(slice)
  204. }
  205. func (slice sortableBlobs) Less(i, j int) bool {
  206. return slice[i].Less(&slice[j])
  207. }
  208. func (slice sortableBlobs) Swap(i, j int) {
  209. slice[i], slice[j] = slice[j], slice[i]
  210. }