renames.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. package plumbing
  2. import (
  3. "log"
  4. "sort"
  5. "unicode/utf8"
  6. "github.com/sergi/go-diff/diffmatchpatch"
  7. "gopkg.in/src-d/go-git.v4"
  8. "gopkg.in/src-d/go-git.v4/plumbing"
  9. "gopkg.in/src-d/go-git.v4/plumbing/object"
  10. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  11. "gopkg.in/src-d/hercules.v5/internal"
  12. "gopkg.in/src-d/hercules.v5/internal/core"
  13. )
  14. // RenameAnalysis improves TreeDiff's results by searching for changed blobs under different
  15. // paths which are likely to be the result of a rename with subsequent edits.
  16. // RenameAnalysis is a PipelineItem.
  17. type RenameAnalysis struct {
  18. core.NoopMerger
  19. // SimilarityThreshold adjusts the heuristic to determine file renames.
  20. // It has the same units as cgit's -X rename-threshold or -M. Better to
  21. // set it to the default value of 80 (80%).
  22. SimilarityThreshold int
  23. repository *git.Repository
  24. }
  25. const (
  26. // RenameAnalysisDefaultThreshold specifies the default percentage of common lines in a pair
  27. // of files to consider them linked. The exact code of the decision is sizesAreClose().
  28. // CGit's default is 50%. Ours is 80% because 50% can be too computationally expensive.
  29. RenameAnalysisDefaultThreshold = 80
  30. // ConfigRenameAnalysisSimilarityThreshold is the name of the configuration option
  31. // (RenameAnalysis.Configure()) which sets the similarity threshold.
  32. ConfigRenameAnalysisSimilarityThreshold = "RenameAnalysis.SimilarityThreshold"
  33. )
  34. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  35. func (ra *RenameAnalysis) Name() string {
  36. return "RenameAnalysis"
  37. }
  38. // Provides returns the list of names of entities which are produced by this PipelineItem.
  39. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  40. // to this list. Also used by core.Registry to build the global map of providers.
  41. func (ra *RenameAnalysis) Provides() []string {
  42. arr := [...]string{DependencyTreeChanges}
  43. return arr[:]
  44. }
  45. // Requires returns the list of names of entities which are needed by this PipelineItem.
  46. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  47. // entities are Provides() upstream.
  48. func (ra *RenameAnalysis) Requires() []string {
  49. arr := [...]string{DependencyBlobCache, DependencyTreeChanges}
  50. return arr[:]
  51. }
  52. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  53. func (ra *RenameAnalysis) ListConfigurationOptions() []core.ConfigurationOption {
  54. options := [...]core.ConfigurationOption{{
  55. Name: ConfigRenameAnalysisSimilarityThreshold,
  56. Description: "The threshold on the similarity index used to detect renames.",
  57. Flag: "M",
  58. Type: core.IntConfigurationOption,
  59. Default: RenameAnalysisDefaultThreshold},
  60. }
  61. return options[:]
  62. }
  63. // Configure sets the properties previously published by ListConfigurationOptions().
  64. func (ra *RenameAnalysis) Configure(facts map[string]interface{}) {
  65. if val, exists := facts[ConfigRenameAnalysisSimilarityThreshold].(int); exists {
  66. ra.SimilarityThreshold = val
  67. }
  68. }
  69. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  70. // calls. The repository which is going to be analysed is supplied as an argument.
  71. func (ra *RenameAnalysis) Initialize(repository *git.Repository) {
  72. if ra.SimilarityThreshold < 0 || ra.SimilarityThreshold > 100 {
  73. log.Printf("Warning: adjusted the similarity threshold to %d\n",
  74. RenameAnalysisDefaultThreshold)
  75. ra.SimilarityThreshold = RenameAnalysisDefaultThreshold
  76. }
  77. ra.repository = repository
  78. }
  79. // Consume runs this PipelineItem on the next commit data.
  80. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  81. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  82. // This function returns the mapping with analysis results. The keys must be the same as
  83. // in Provides(). If there was an error, nil is returned.
  84. func (ra *RenameAnalysis) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  85. changes := deps[DependencyTreeChanges].(object.Changes)
  86. cache := deps[DependencyBlobCache].(map[plumbing.Hash]*CachedBlob)
  87. reducedChanges := make(object.Changes, 0, changes.Len())
  88. // Stage 1 - find renames by matching the hashes
  89. // n log(n)
  90. // We sort additions and deletions by hash and then do the single scan along
  91. // both slices.
  92. deleted := make(sortableChanges, 0, changes.Len())
  93. added := make(sortableChanges, 0, changes.Len())
  94. for _, change := range changes {
  95. action, err := change.Action()
  96. if err != nil {
  97. return nil, err
  98. }
  99. switch action {
  100. case merkletrie.Insert:
  101. added = append(added, sortableChange{change, change.To.TreeEntry.Hash})
  102. case merkletrie.Delete:
  103. deleted = append(deleted, sortableChange{change, change.From.TreeEntry.Hash})
  104. case merkletrie.Modify:
  105. reducedChanges = append(reducedChanges, change)
  106. }
  107. }
  108. sort.Sort(deleted)
  109. sort.Sort(added)
  110. a := 0
  111. d := 0
  112. stillDeleted := make(object.Changes, 0, deleted.Len())
  113. stillAdded := make(object.Changes, 0, added.Len())
  114. for a < added.Len() && d < deleted.Len() {
  115. if added[a].hash == deleted[d].hash {
  116. reducedChanges = append(
  117. reducedChanges,
  118. &object.Change{From: deleted[d].change.From, To: added[a].change.To})
  119. a++
  120. d++
  121. } else if added[a].Less(&deleted[d]) {
  122. stillAdded = append(stillAdded, added[a].change)
  123. a++
  124. } else {
  125. stillDeleted = append(stillDeleted, deleted[d].change)
  126. d++
  127. }
  128. }
  129. for ; a < added.Len(); a++ {
  130. stillAdded = append(stillAdded, added[a].change)
  131. }
  132. for ; d < deleted.Len(); d++ {
  133. stillDeleted = append(stillDeleted, deleted[d].change)
  134. }
  135. // Stage 2 - apply the similarity threshold
  136. // n^2 but actually linear
  137. // We sort the blobs by size and do the single linear scan.
  138. addedBlobs := make(sortableBlobs, 0, stillAdded.Len())
  139. deletedBlobs := make(sortableBlobs, 0, stillDeleted.Len())
  140. for _, change := range stillAdded {
  141. blob := cache[change.To.TreeEntry.Hash]
  142. addedBlobs = append(
  143. addedBlobs, sortableBlob{change: change, size: blob.Size})
  144. }
  145. for _, change := range stillDeleted {
  146. blob := cache[change.From.TreeEntry.Hash]
  147. deletedBlobs = append(
  148. deletedBlobs, sortableBlob{change: change, size: blob.Size})
  149. }
  150. sort.Sort(addedBlobs)
  151. sort.Sort(deletedBlobs)
  152. dStart := 0
  153. for a = 0; a < addedBlobs.Len(); a++ {
  154. myBlob := cache[addedBlobs[a].change.To.TreeEntry.Hash]
  155. mySize := addedBlobs[a].size
  156. for d = dStart; d < deletedBlobs.Len() && !ra.sizesAreClose(mySize, deletedBlobs[d].size); d++ {
  157. }
  158. dStart = d
  159. foundMatch := false
  160. for d = dStart; d < deletedBlobs.Len() && ra.sizesAreClose(mySize, deletedBlobs[d].size); d++ {
  161. blobsAreClose, err := ra.blobsAreClose(
  162. myBlob, cache[deletedBlobs[d].change.From.TreeEntry.Hash])
  163. if err != nil {
  164. return nil, err
  165. }
  166. if blobsAreClose {
  167. foundMatch = true
  168. reducedChanges = append(
  169. reducedChanges,
  170. &object.Change{From: deletedBlobs[d].change.From,
  171. To: addedBlobs[a].change.To})
  172. break
  173. }
  174. }
  175. if foundMatch {
  176. addedBlobs = append(addedBlobs[:a], addedBlobs[a+1:]...)
  177. a--
  178. deletedBlobs = append(deletedBlobs[:d], deletedBlobs[d+1:]...)
  179. }
  180. }
  181. // Stage 3 - we give up, everything left are independent additions and deletions
  182. for _, blob := range addedBlobs {
  183. reducedChanges = append(reducedChanges, blob.change)
  184. }
  185. for _, blob := range deletedBlobs {
  186. reducedChanges = append(reducedChanges, blob.change)
  187. }
  188. return map[string]interface{}{DependencyTreeChanges: reducedChanges}, nil
  189. }
  190. // Fork clones this PipelineItem.
  191. func (ra *RenameAnalysis) Fork(n int) []core.PipelineItem {
  192. return core.ForkSamePipelineItem(ra, n)
  193. }
  194. func (ra *RenameAnalysis) sizesAreClose(size1 int64, size2 int64) bool {
  195. size := internal.Max64(1, internal.Max64(size1, size2))
  196. return (internal.Abs64(size1-size2)*100)/size <= int64(100-ra.SimilarityThreshold)
  197. }
  198. func (ra *RenameAnalysis) blobsAreClose(
  199. blob1 *CachedBlob, blob2 *CachedBlob) (bool, error) {
  200. src, dst := string(blob1.Data), string(blob2.Data)
  201. maxSize := internal.Max(1, internal.Max(utf8.RuneCountInString(src), utf8.RuneCountInString(dst)))
  202. // compute the line-by-line diff, then the char-level diffs of the del-ins blocks
  203. // yes, this algorithm is greedy and not exact
  204. dmp := diffmatchpatch.New()
  205. srcLines, dstLines, lines := dmp.DiffLinesToRunes(src, dst)
  206. diffs := dmp.DiffMainRunes(srcLines, dstLines, false)
  207. var common, posSrc, prevPosSrc, posDst int
  208. possibleDelInsBlock := false
  209. for _, edit := range diffs {
  210. switch edit.Type {
  211. case diffmatchpatch.DiffDelete:
  212. possibleDelInsBlock = true
  213. prevPosSrc = posSrc
  214. for _, lineno := range edit.Text {
  215. posSrc += len(lines[lineno])
  216. }
  217. case diffmatchpatch.DiffInsert:
  218. nextPosDst := posDst
  219. for _, lineno := range edit.Text {
  220. nextPosDst += len(lines[lineno])
  221. }
  222. if possibleDelInsBlock {
  223. possibleDelInsBlock = false
  224. localDmp := diffmatchpatch.New()
  225. localSrc := src[prevPosSrc:posSrc]
  226. localDst := dst[posDst:nextPosDst]
  227. localDiffs := localDmp.DiffMainRunes([]rune(localSrc), []rune(localDst), false)
  228. for _, localEdit := range localDiffs {
  229. if localEdit.Type == diffmatchpatch.DiffEqual {
  230. common += utf8.RuneCountInString(localEdit.Text)
  231. }
  232. }
  233. }
  234. posDst = nextPosDst
  235. case diffmatchpatch.DiffEqual:
  236. possibleDelInsBlock = false
  237. for _, lineno := range edit.Text {
  238. common += utf8.RuneCountInString(lines[lineno])
  239. step := len(lines[lineno])
  240. posSrc += step
  241. posDst += step
  242. }
  243. }
  244. // supposing that the rest of the lines are the same (they are not - too optimistic),
  245. // estimate the maximum similarity and exit the loop if it lower than our threshold
  246. maxCommon := common + internal.Min(
  247. utf8.RuneCountInString(src[posSrc:]),
  248. utf8.RuneCountInString(dst[posDst:]))
  249. similarity := (maxCommon * 100) / maxSize
  250. if similarity < ra.SimilarityThreshold {
  251. return false, nil
  252. }
  253. }
  254. // the very last "overly optimistic" estimate was actually precise, so since we are still here
  255. // the blobs are similar
  256. return true, nil
  257. }
  258. type sortableChange struct {
  259. change *object.Change
  260. hash plumbing.Hash
  261. }
  262. type sortableChanges []sortableChange
  263. func (change *sortableChange) Less(other *sortableChange) bool {
  264. for x := 0; x < 20; x++ {
  265. if change.hash[x] < other.hash[x] {
  266. return true
  267. }
  268. }
  269. return false
  270. }
  271. func (slice sortableChanges) Len() int {
  272. return len(slice)
  273. }
  274. func (slice sortableChanges) Less(i, j int) bool {
  275. return slice[i].Less(&slice[j])
  276. }
  277. func (slice sortableChanges) Swap(i, j int) {
  278. slice[i], slice[j] = slice[j], slice[i]
  279. }
  280. type sortableBlob struct {
  281. change *object.Change
  282. size int64
  283. }
  284. type sortableBlobs []sortableBlob
  285. func (change *sortableBlob) Less(other *sortableBlob) bool {
  286. return change.size < other.size
  287. }
  288. func (slice sortableBlobs) Len() int {
  289. return len(slice)
  290. }
  291. func (slice sortableBlobs) Less(i, j int) bool {
  292. return slice[i].Less(&slice[j])
  293. }
  294. func (slice sortableBlobs) Swap(i, j int) {
  295. slice[i], slice[j] = slice[j], slice[i]
  296. }
  297. func init() {
  298. core.Registry.Register(&RenameAnalysis{})
  299. }