renames.go 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. package hercules
  2. import (
  3. "fmt"
  4. "os"
  5. "sort"
  6. "unicode/utf8"
  7. "github.com/sergi/go-diff/diffmatchpatch"
  8. "gopkg.in/src-d/go-git.v4"
  9. "gopkg.in/src-d/go-git.v4/plumbing"
  10. "gopkg.in/src-d/go-git.v4/plumbing/object"
  11. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  12. )
  13. type RenameAnalysis struct {
  14. // SimilarityThreshold adjusts the heuristic to determine file renames.
  15. // It has the same units as cgit's -X rename-threshold or -M. Better to
  16. // set it to the default value of 90 (90%).
  17. SimilarityThreshold int
  18. repository *git.Repository
  19. }
  20. const (
  21. RENAME_ANALYSIS_DEFAULT_THRESHOLD = 90
  22. ConfigRenameAnalysisSimilarityThreshold = "RenameAnalysis.SimilarityThreshold"
  23. )
  24. func (ra *RenameAnalysis) Name() string {
  25. return "RenameAnalysis"
  26. }
  27. func (ra *RenameAnalysis) Provides() []string {
  28. arr := [...]string{"changes"}
  29. return arr[:]
  30. }
  31. func (ra *RenameAnalysis) Requires() []string {
  32. arr := [...]string{"blob_cache", "changes"}
  33. return arr[:]
  34. }
  35. func (ra *RenameAnalysis) ListConfigurationOptions() []ConfigurationOption {
  36. options := [...]ConfigurationOption{{
  37. Name: ConfigRenameAnalysisSimilarityThreshold,
  38. Description: "The threshold on the similarity index used to detect renames.",
  39. Flag: "M",
  40. Type: IntConfigurationOption,
  41. Default: RENAME_ANALYSIS_DEFAULT_THRESHOLD},
  42. }
  43. return options[:]
  44. }
  45. func (ra *RenameAnalysis) Configure(facts map[string]interface{}) {
  46. if val, exists := facts[ConfigRenameAnalysisSimilarityThreshold].(int); exists {
  47. ra.SimilarityThreshold = val
  48. }
  49. }
  50. func (ra *RenameAnalysis) Initialize(repository *git.Repository) {
  51. if ra.SimilarityThreshold < 0 || ra.SimilarityThreshold > 100 {
  52. fmt.Fprintf(os.Stderr, "Warning: adjusted the similarity threshold to %d\n",
  53. RENAME_ANALYSIS_DEFAULT_THRESHOLD)
  54. ra.SimilarityThreshold = RENAME_ANALYSIS_DEFAULT_THRESHOLD
  55. }
  56. ra.repository = repository
  57. }
  58. func (ra *RenameAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  59. changes := deps["changes"].(object.Changes)
  60. cache := deps["blob_cache"].(map[plumbing.Hash]*object.Blob)
  61. reduced_changes := make(object.Changes, 0, changes.Len())
  62. // Stage 1 - find renames by matching the hashes
  63. // n log(n)
  64. // We sort additions and deletions by hash and then do the single scan along
  65. // both slices.
  66. deleted := make(sortableChanges, 0, changes.Len())
  67. added := make(sortableChanges, 0, changes.Len())
  68. for _, change := range changes {
  69. action, err := change.Action()
  70. if err != nil {
  71. return nil, err
  72. }
  73. switch action {
  74. case merkletrie.Insert:
  75. added = append(added, sortableChange{change, change.To.TreeEntry.Hash})
  76. case merkletrie.Delete:
  77. deleted = append(deleted, sortableChange{change, change.From.TreeEntry.Hash})
  78. case merkletrie.Modify:
  79. reduced_changes = append(reduced_changes, change)
  80. }
  81. }
  82. sort.Sort(deleted)
  83. sort.Sort(added)
  84. a := 0
  85. d := 0
  86. still_deleted := make(object.Changes, 0, deleted.Len())
  87. still_added := make(object.Changes, 0, added.Len())
  88. for a < added.Len() && d < deleted.Len() {
  89. if added[a].hash == deleted[d].hash {
  90. reduced_changes = append(
  91. reduced_changes,
  92. &object.Change{From: deleted[d].change.From, To: added[a].change.To})
  93. a++
  94. d++
  95. } else if added[a].Less(&deleted[d]) {
  96. still_added = append(still_added, added[a].change)
  97. a++
  98. } else {
  99. still_deleted = append(still_deleted, deleted[d].change)
  100. d++
  101. }
  102. }
  103. for ; a < added.Len(); a++ {
  104. still_added = append(still_added, added[a].change)
  105. }
  106. for ; d < deleted.Len(); d++ {
  107. still_deleted = append(still_deleted, deleted[d].change)
  108. }
  109. // Stage 2 - apply the similarity threshold
  110. // n^2 but actually linear
  111. // We sort the blobs by size and do the single linear scan.
  112. added_blobs := make(sortableBlobs, 0, still_added.Len())
  113. deleted_blobs := make(sortableBlobs, 0, still_deleted.Len())
  114. for _, change := range still_added {
  115. blob := cache[change.To.TreeEntry.Hash]
  116. added_blobs = append(
  117. added_blobs, sortableBlob{change: change, size: blob.Size})
  118. }
  119. for _, change := range still_deleted {
  120. blob := cache[change.From.TreeEntry.Hash]
  121. deleted_blobs = append(
  122. deleted_blobs, sortableBlob{change: change, size: blob.Size})
  123. }
  124. sort.Sort(added_blobs)
  125. sort.Sort(deleted_blobs)
  126. d_start := 0
  127. for a = 0; a < added_blobs.Len(); a++ {
  128. my_blob := cache[added_blobs[a].change.To.TreeEntry.Hash]
  129. my_size := added_blobs[a].size
  130. for d = d_start; d < deleted_blobs.Len() && !ra.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  131. }
  132. d_start = d
  133. found_match := false
  134. for d = d_start; d < deleted_blobs.Len() && ra.sizesAreClose(my_size, deleted_blobs[d].size); d++ {
  135. blobsAreClose, err := ra.blobsAreClose(
  136. my_blob, cache[deleted_blobs[d].change.From.TreeEntry.Hash])
  137. if err != nil {
  138. return nil, err
  139. }
  140. if blobsAreClose {
  141. found_match = true
  142. reduced_changes = append(
  143. reduced_changes,
  144. &object.Change{From: deleted_blobs[d].change.From,
  145. To: added_blobs[a].change.To})
  146. break
  147. }
  148. }
  149. if found_match {
  150. added_blobs = append(added_blobs[:a], added_blobs[a+1:]...)
  151. a--
  152. deleted_blobs = append(deleted_blobs[:d], deleted_blobs[d+1:]...)
  153. }
  154. }
  155. // Stage 3 - we give up, everything left are independent additions and deletions
  156. for _, blob := range added_blobs {
  157. reduced_changes = append(reduced_changes, blob.change)
  158. }
  159. for _, blob := range deleted_blobs {
  160. reduced_changes = append(reduced_changes, blob.change)
  161. }
  162. return map[string]interface{}{"changes": reduced_changes}, nil
  163. }
  164. func (ra *RenameAnalysis) sizesAreClose(size1 int64, size2 int64) bool {
  165. return abs64(size1-size2)*100/max64(1, min64(size1, size2)) <=
  166. int64(100-ra.SimilarityThreshold)
  167. }
  168. func (ra *RenameAnalysis) blobsAreClose(
  169. blob1 *object.Blob, blob2 *object.Blob) (bool, error) {
  170. str_from, err := BlobToString(blob1)
  171. if err != nil {
  172. return false, err
  173. }
  174. str_to, err := BlobToString(blob2)
  175. if err != nil {
  176. return false, err
  177. }
  178. dmp := diffmatchpatch.New()
  179. src, dst, _ := dmp.DiffLinesToRunes(str_from, str_to)
  180. diffs := dmp.DiffMainRunes(src, dst, false)
  181. common := 0
  182. for _, edit := range diffs {
  183. if edit.Type == diffmatchpatch.DiffEqual {
  184. common += utf8.RuneCountInString(edit.Text)
  185. }
  186. }
  187. return common*100/max(1, min(len(src), len(dst))) >= ra.SimilarityThreshold, nil
  188. }
  189. type sortableChange struct {
  190. change *object.Change
  191. hash plumbing.Hash
  192. }
  193. type sortableChanges []sortableChange
  194. func (change *sortableChange) Less(other *sortableChange) bool {
  195. for x := 0; x < 20; x++ {
  196. if change.hash[x] < other.hash[x] {
  197. return true
  198. }
  199. }
  200. return false
  201. }
  202. func (slice sortableChanges) Len() int {
  203. return len(slice)
  204. }
  205. func (slice sortableChanges) Less(i, j int) bool {
  206. return slice[i].Less(&slice[j])
  207. }
  208. func (slice sortableChanges) Swap(i, j int) {
  209. slice[i], slice[j] = slice[j], slice[i]
  210. }
  211. type sortableBlob struct {
  212. change *object.Change
  213. size int64
  214. }
  215. type sortableBlobs []sortableBlob
  216. func (change *sortableBlob) Less(other *sortableBlob) bool {
  217. return change.size < other.size
  218. }
  219. func (slice sortableBlobs) Len() int {
  220. return len(slice)
  221. }
  222. func (slice sortableBlobs) Less(i, j int) bool {
  223. return slice[i].Less(&slice[j])
  224. }
  225. func (slice sortableBlobs) Swap(i, j int) {
  226. slice[i], slice[j] = slice[j], slice[i]
  227. }
  228. func init() {
  229. Registry.Register(&RenameAnalysis{})
  230. }