pipeline.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. package hercules
  2. import (
  3. "bufio"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "os"
  9. "path/filepath"
  10. "sort"
  11. "time"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/hercules.v3/pb"
  16. "gopkg.in/src-d/hercules.v3/toposort"
  17. )
  18. type ConfigurationOptionType int
  19. const (
  20. // Boolean value type.
  21. BoolConfigurationOption ConfigurationOptionType = iota
  22. // Integer value type.
  23. IntConfigurationOption
  24. // String value type.
  25. StringConfigurationOption
  26. )
  27. func (opt ConfigurationOptionType) String() string {
  28. switch opt {
  29. case BoolConfigurationOption:
  30. return ""
  31. case IntConfigurationOption:
  32. return "int"
  33. case StringConfigurationOption:
  34. return "string"
  35. }
  36. panic(fmt.Sprintf("Invalid ConfigurationOptionType value %d", opt))
  37. }
  38. // ConfigurationOption allows for the unified, retrospective way to setup PipelineItem-s.
  39. type ConfigurationOption struct {
  40. // Name identifies the configuration option in facts.
  41. Name string
  42. // Description represents the help text about the configuration option.
  43. Description string
  44. // Flag corresponds to the CLI token with "-" prepended.
  45. Flag string
  46. // Type specifies the kind of the configuration option's value.
  47. Type ConfigurationOptionType
  48. // Default is the initial value of the configuration option.
  49. Default interface{}
  50. }
  51. func (opt ConfigurationOption) FormatDefault() string {
  52. if opt.Type != StringConfigurationOption {
  53. return fmt.Sprint(opt.Default)
  54. }
  55. return fmt.Sprintf("\"%s\"", opt.Default)
  56. }
  57. // PipelineItem is the interface for all the units of the Git commit analysis pipeline.
  58. type PipelineItem interface {
  59. // Name returns the name of the analysis.
  60. Name() string
  61. // Provides returns the list of keys of reusable calculated entities.
  62. // Other items may depend on them.
  63. Provides() []string
  64. // Requires returns the list of keys of needed entities which must be supplied in Consume().
  65. Requires() []string
  66. // ListConfigurationOptions returns the list of available options which can be consumed by Configure().
  67. ListConfigurationOptions() []ConfigurationOption
  68. // Configure performs the initial setup of the object by applying parameters from facts.
  69. // It allows to create PipelineItems in a universal way.
  70. Configure(facts map[string]interface{})
  71. // Initialize prepares and resets the item. Consume() requires Initialize()
  72. // to be called at least once beforehand.
  73. Initialize(*git.Repository)
  74. // Consume processes the next commit.
  75. // deps contains the required entities which match Depends(). Besides, it always includes
  76. // "commit" and "index".
  77. // Returns the calculated entities which match Provides().
  78. Consume(deps map[string]interface{}) (map[string]interface{}, error)
  79. }
  80. // FeaturedPipelineItem enables switching the automatic insertion of pipeline items on or off.
  81. type FeaturedPipelineItem interface {
  82. PipelineItem
  83. // Features returns the list of names which enable this item to be automatically inserted
  84. // in Pipeline.DeployItem().
  85. Features() []string
  86. }
  87. // LeafPipelineItem corresponds to the top level pipeline items which produce the end results.
  88. type LeafPipelineItem interface {
  89. PipelineItem
  90. // Flag returns the cmdline name of the item.
  91. Flag() string
  92. // Finalize returns the result of the analysis.
  93. Finalize() interface{}
  94. // Serialize encodes the object returned by Finalize() to YAML or Protocol Buffers.
  95. Serialize(result interface{}, binary bool, writer io.Writer) error
  96. }
  97. // MergeablePipelineItem specifies the methods to combine several analysis results together.
  98. type MergeablePipelineItem interface {
  99. LeafPipelineItem
  100. // Deserialize loads the result from Protocol Buffers blob.
  101. Deserialize(pbmessage []byte) (interface{}, error)
  102. // MergeResults joins two results together. Common-s are specified as the global state.
  103. MergeResults(r1, r2 interface{}, c1, c2 *CommonAnalysisResult) interface{}
  104. }
  105. // CommonAnalysisResult holds the information which is always extracted at Pipeline.Run().
  106. type CommonAnalysisResult struct {
  107. // Time of the first commit in the analysed sequence.
  108. BeginTime int64
  109. // Time of the last commit in the analysed sequence.
  110. EndTime int64
  111. // The number of commits in the analysed sequence.
  112. CommitsNumber int
  113. // The duration of Pipeline.Run().
  114. RunTime time.Duration
  115. }
  116. func (car *CommonAnalysisResult) BeginTimeAsTime() time.Time {
  117. return time.Unix(car.BeginTime, 0)
  118. }
  119. func (car *CommonAnalysisResult) EndTimeAsTime() time.Time {
  120. return time.Unix(car.EndTime, 0)
  121. }
  122. func (car *CommonAnalysisResult) Merge(other *CommonAnalysisResult) {
  123. if car.EndTime == 0 || other.BeginTime == 0 {
  124. panic("Merging with an uninitialized CommonAnalysisResult")
  125. }
  126. if other.BeginTime < car.BeginTime {
  127. car.BeginTime = other.BeginTime
  128. }
  129. if other.EndTime > car.EndTime {
  130. car.EndTime = other.EndTime
  131. }
  132. car.CommitsNumber += other.CommitsNumber
  133. car.RunTime += other.RunTime
  134. }
  135. func (car *CommonAnalysisResult) FillMetadata(meta *pb.Metadata) *pb.Metadata {
  136. meta.BeginUnixTime = car.BeginTime
  137. meta.EndUnixTime = car.EndTime
  138. meta.Commits = int32(car.CommitsNumber)
  139. meta.RunTime = car.RunTime.Nanoseconds() / 1e6
  140. return meta
  141. }
  142. func MetadataToCommonAnalysisResult(meta *pb.Metadata) *CommonAnalysisResult {
  143. return &CommonAnalysisResult{
  144. BeginTime: meta.BeginUnixTime,
  145. EndTime: meta.EndUnixTime,
  146. CommitsNumber: int(meta.Commits),
  147. RunTime: time.Duration(meta.RunTime * 1e6),
  148. }
  149. }
  150. type Pipeline struct {
  151. // OnProgress is the callback which is invoked in Analyse() to output it's
  152. // progress. The first argument is the number of processed commits and the
  153. // second is the total number of commits.
  154. OnProgress func(int, int)
  155. // Repository points to the analysed Git repository struct from go-git.
  156. repository *git.Repository
  157. // Items are the registered building blocks in the pipeline. The order defines the
  158. // execution sequence.
  159. items []PipelineItem
  160. // The collection of parameters to create items.
  161. facts map[string]interface{}
  162. // Feature flags which enable the corresponding items.
  163. features map[string]bool
  164. }
  165. const (
  166. ConfigPipelineDumpPath = "Pipeline.DumpPath"
  167. ConfigPipelineDryRun = "Pipeline.DryRun"
  168. FactPipelineCommits = "commits"
  169. )
  170. func NewPipeline(repository *git.Repository) *Pipeline {
  171. return &Pipeline{
  172. repository: repository,
  173. items: []PipelineItem{},
  174. facts: map[string]interface{}{},
  175. features: map[string]bool{},
  176. }
  177. }
  178. func (pipeline *Pipeline) GetFact(name string) interface{} {
  179. return pipeline.facts[name]
  180. }
  181. func (pipeline *Pipeline) SetFact(name string, value interface{}) {
  182. pipeline.facts[name] = value
  183. }
  184. func (pipeline *Pipeline) GetFeature(name string) (bool, bool) {
  185. val, exists := pipeline.features[name]
  186. return val, exists
  187. }
  188. func (pipeline *Pipeline) SetFeature(name string) {
  189. pipeline.features[name] = true
  190. }
  191. func (pipeline *Pipeline) SetFeaturesFromFlags() {
  192. for _, feature := range featureFlags.Flags {
  193. pipeline.SetFeature(feature)
  194. }
  195. }
  196. func (pipeline *Pipeline) DeployItem(item PipelineItem) PipelineItem {
  197. fpi, ok := item.(FeaturedPipelineItem)
  198. if ok {
  199. for _, f := range fpi.Features() {
  200. pipeline.SetFeature(f)
  201. }
  202. }
  203. queue := []PipelineItem{}
  204. queue = append(queue, item)
  205. added := map[string]PipelineItem{}
  206. for _, item := range pipeline.items {
  207. added[item.Name()] = item
  208. }
  209. added[item.Name()] = item
  210. pipeline.AddItem(item)
  211. for len(queue) > 0 {
  212. head := queue[0]
  213. queue = queue[1:]
  214. for _, dep := range head.Requires() {
  215. for _, sibling := range Registry.Summon(dep) {
  216. if _, exists := added[sibling.Name()]; !exists {
  217. disabled := false
  218. // If this item supports features, check them against the activated in pipeline.features
  219. if fpi, matches := sibling.(FeaturedPipelineItem); matches {
  220. for _, feature := range fpi.Features() {
  221. if !pipeline.features[feature] {
  222. disabled = true
  223. break
  224. }
  225. }
  226. }
  227. if disabled {
  228. continue
  229. }
  230. added[sibling.Name()] = sibling
  231. queue = append(queue, sibling)
  232. pipeline.AddItem(sibling)
  233. }
  234. }
  235. }
  236. }
  237. return item
  238. }
  239. func (pipeline *Pipeline) AddItem(item PipelineItem) PipelineItem {
  240. pipeline.items = append(pipeline.items, item)
  241. return item
  242. }
  243. func (pipeline *Pipeline) RemoveItem(item PipelineItem) {
  244. for i, reg := range pipeline.items {
  245. if reg == item {
  246. pipeline.items = append(pipeline.items[:i], pipeline.items[i+1:]...)
  247. return
  248. }
  249. }
  250. }
  251. func (pipeline *Pipeline) Len() int {
  252. return len(pipeline.items)
  253. }
  254. // Commits returns the critical path in the repository's history. It starts
  255. // from HEAD and traces commits backwards till the root. When it encounters
  256. // a merge (more than one parent), it always chooses the first parent.
  257. func (pipeline *Pipeline) Commits() []*object.Commit {
  258. result := []*object.Commit{}
  259. repository := pipeline.repository
  260. head, err := repository.Head()
  261. if err != nil {
  262. panic(err)
  263. }
  264. commit, err := repository.CommitObject(head.Hash())
  265. if err != nil {
  266. panic(err)
  267. }
  268. // the first parent matches the head
  269. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  270. if err != nil {
  271. panic(err)
  272. }
  273. result = append(result, commit)
  274. }
  275. // reverse the order
  276. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  277. result[i], result[j] = result[j], result[i]
  278. }
  279. return result
  280. }
  281. type sortablePipelineItems []PipelineItem
  282. func (items sortablePipelineItems) Len() int {
  283. return len(items)
  284. }
  285. func (items sortablePipelineItems) Less(i, j int) bool {
  286. return items[i].Name() < items[j].Name()
  287. }
  288. func (items sortablePipelineItems) Swap(i, j int) {
  289. items[i], items[j] = items[j], items[i]
  290. }
  291. func (pipeline *Pipeline) resolve(dumpPath string) {
  292. graph := toposort.NewGraph()
  293. sort.Sort(sortablePipelineItems(pipeline.items))
  294. name2item := map[string]PipelineItem{}
  295. ambiguousMap := map[string][]string{}
  296. nameUsages := map[string]int{}
  297. for _, item := range pipeline.items {
  298. nameUsages[item.Name()]++
  299. }
  300. counters := map[string]int{}
  301. for _, item := range pipeline.items {
  302. name := item.Name()
  303. if nameUsages[name] > 1 {
  304. index := counters[item.Name()] + 1
  305. counters[item.Name()] = index
  306. name = fmt.Sprintf("%s_%d", item.Name(), index)
  307. }
  308. graph.AddNode(name)
  309. name2item[name] = item
  310. for _, key := range item.Provides() {
  311. key = "[" + key + "]"
  312. graph.AddNode(key)
  313. if graph.AddEdge(name, key) > 1 {
  314. if ambiguousMap[key] != nil {
  315. fmt.Fprintln(os.Stderr, "Pipeline:")
  316. for _, item2 := range pipeline.items {
  317. if item2 == item {
  318. fmt.Fprint(os.Stderr, "> ")
  319. }
  320. fmt.Fprint(os.Stderr, item2.Name(), " [")
  321. for i, key2 := range item2.Provides() {
  322. fmt.Fprint(os.Stderr, key2)
  323. if i < len(item.Provides())-1 {
  324. fmt.Fprint(os.Stderr, ", ")
  325. }
  326. }
  327. fmt.Fprintln(os.Stderr, "]")
  328. }
  329. panic("Failed to resolve pipeline dependencies: ambiguous graph.")
  330. }
  331. ambiguousMap[key] = graph.FindParents(key)
  332. }
  333. }
  334. }
  335. counters = map[string]int{}
  336. for _, item := range pipeline.items {
  337. name := item.Name()
  338. if nameUsages[name] > 1 {
  339. index := counters[item.Name()] + 1
  340. counters[item.Name()] = index
  341. name = fmt.Sprintf("%s_%d", item.Name(), index)
  342. }
  343. for _, key := range item.Requires() {
  344. key = "[" + key + "]"
  345. if graph.AddEdge(key, name) == 0 {
  346. panic(fmt.Sprintf("Unsatisfied dependency: %s -> %s", key, item.Name()))
  347. }
  348. }
  349. }
  350. if len(ambiguousMap) > 0 {
  351. ambiguous := []string{}
  352. for key := range ambiguousMap {
  353. ambiguous = append(ambiguous, key)
  354. }
  355. sort.Strings(ambiguous)
  356. bfsorder := graph.BreadthSort()
  357. bfsindex := map[string]int{}
  358. for i, s := range bfsorder {
  359. bfsindex[s] = i
  360. }
  361. for len(ambiguous) > 0 {
  362. key := ambiguous[0]
  363. ambiguous = ambiguous[1:]
  364. pair := ambiguousMap[key]
  365. inheritor := pair[1]
  366. if bfsindex[pair[1]] < bfsindex[pair[0]] {
  367. inheritor = pair[0]
  368. }
  369. removed := graph.RemoveEdge(key, inheritor)
  370. cycle := map[string]bool{}
  371. for _, node := range graph.FindCycle(key) {
  372. cycle[node] = true
  373. }
  374. if len(cycle) == 0 {
  375. cycle[inheritor] = true
  376. }
  377. if removed {
  378. graph.AddEdge(key, inheritor)
  379. }
  380. graph.RemoveEdge(inheritor, key)
  381. graph.ReindexNode(inheritor)
  382. // for all nodes key links to except those in cycle, put the link from inheritor
  383. for _, node := range graph.FindChildren(key) {
  384. if _, exists := cycle[node]; !exists {
  385. graph.AddEdge(inheritor, node)
  386. graph.RemoveEdge(key, node)
  387. }
  388. }
  389. graph.ReindexNode(key)
  390. }
  391. }
  392. var graphCopy *toposort.Graph
  393. if dumpPath != "" {
  394. graphCopy = graph.Copy()
  395. }
  396. strplan, ok := graph.Toposort()
  397. if !ok {
  398. panic("Failed to resolve pipeline dependencies: unable to topologically sort the items.")
  399. }
  400. pipeline.items = make([]PipelineItem, 0, len(pipeline.items))
  401. for _, key := range strplan {
  402. if item, ok := name2item[key]; ok {
  403. pipeline.items = append(pipeline.items, item)
  404. }
  405. }
  406. if dumpPath != "" {
  407. // If there is a floating difference, uncomment this:
  408. // fmt.Fprint(os.Stderr, graphCopy.DebugDump())
  409. ioutil.WriteFile(dumpPath, []byte(graphCopy.Serialize(strplan)), 0666)
  410. absPath, _ := filepath.Abs(dumpPath)
  411. fmt.Fprintf(os.Stderr, "Wrote the DAG to %s\n", absPath)
  412. }
  413. }
  414. func (pipeline *Pipeline) Initialize(facts map[string]interface{}) {
  415. if facts == nil {
  416. facts = map[string]interface{}{}
  417. }
  418. if _, exists := facts[FactPipelineCommits]; !exists {
  419. facts[FactPipelineCommits] = pipeline.Commits()
  420. }
  421. dumpPath, _ := facts[ConfigPipelineDumpPath].(string)
  422. pipeline.resolve(dumpPath)
  423. if dryRun, _ := facts[ConfigPipelineDryRun].(bool); dryRun {
  424. return
  425. }
  426. for _, item := range pipeline.items {
  427. item.Configure(facts)
  428. }
  429. for _, item := range pipeline.items {
  430. item.Initialize(pipeline.repository)
  431. }
  432. }
  433. // Run method executes the pipeline.
  434. //
  435. // commits is a slice with the sequential commit history. It shall start from
  436. // the root (ascending order).
  437. //
  438. // Returns the mapping from each LeafPipelineItem to the corresponding analysis result.
  439. // There is always a "nil" record with CommonAnalysisResult.
  440. func (pipeline *Pipeline) Run(commits []*object.Commit) (map[LeafPipelineItem]interface{}, error) {
  441. startRunTime := time.Now()
  442. onProgress := pipeline.OnProgress
  443. if onProgress == nil {
  444. onProgress = func(int, int) {}
  445. }
  446. for index, commit := range commits {
  447. onProgress(index, len(commits))
  448. state := map[string]interface{}{"commit": commit, "index": index}
  449. for _, item := range pipeline.items {
  450. update, err := item.Consume(state)
  451. if err != nil {
  452. fmt.Fprintf(os.Stderr, "%s failed on commit #%d %s\n",
  453. item.Name(), index, commit.Hash.String())
  454. return nil, err
  455. }
  456. for _, key := range item.Provides() {
  457. val, ok := update[key]
  458. if !ok {
  459. panic(fmt.Sprintf("%s: Consume() did not return %s", item.Name(), key))
  460. }
  461. state[key] = val
  462. }
  463. }
  464. }
  465. onProgress(len(commits), len(commits))
  466. result := map[LeafPipelineItem]interface{}{}
  467. for _, item := range pipeline.items {
  468. if casted, ok := item.(LeafPipelineItem); ok {
  469. result[casted] = casted.Finalize()
  470. }
  471. }
  472. result[nil] = &CommonAnalysisResult{
  473. BeginTime: commits[0].Author.When.Unix(),
  474. EndTime: commits[len(commits)-1].Author.When.Unix(),
  475. CommitsNumber: len(commits),
  476. RunTime: time.Since(startRunTime),
  477. }
  478. return result, nil
  479. }
  480. func LoadCommitsFromFile(path string, repository *git.Repository) ([]*object.Commit, error) {
  481. var file io.ReadCloser
  482. if path != "-" {
  483. var err error
  484. file, err = os.Open(path)
  485. if err != nil {
  486. return nil, err
  487. }
  488. defer file.Close()
  489. } else {
  490. file = os.Stdin
  491. }
  492. scanner := bufio.NewScanner(file)
  493. commits := []*object.Commit{}
  494. for scanner.Scan() {
  495. hash := plumbing.NewHash(scanner.Text())
  496. if len(hash) != 20 {
  497. return nil, errors.New("invalid commit hash " + scanner.Text())
  498. }
  499. commit, err := repository.CommitObject(hash)
  500. if err != nil {
  501. return nil, err
  502. }
  503. commits = append(commits, commit)
  504. }
  505. return commits, nil
  506. }