pipeline.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. package hercules
  2. import (
  3. "bufio"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "os"
  9. "path/filepath"
  10. "sort"
  11. "time"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/hercules.v3/pb"
  16. "gopkg.in/src-d/hercules.v3/toposort"
  17. )
  18. // ConfigurationOptionType represents the possible types of a ConfigurationOption's value.
  19. type ConfigurationOptionType int
  20. const (
  21. // BoolConfigurationOption reflects the boolean value type.
  22. BoolConfigurationOption ConfigurationOptionType = iota
  23. // IntConfigurationOption reflects the integer value type.
  24. IntConfigurationOption
  25. // StringConfigurationOption reflects the string value type.
  26. StringConfigurationOption
  27. )
  28. // String() returns an empty string for the boolean type, "int" for integers and "string" for
  29. // strings. It is used in the command line interface to show the argument's type.
  30. func (opt ConfigurationOptionType) String() string {
  31. switch opt {
  32. case BoolConfigurationOption:
  33. return ""
  34. case IntConfigurationOption:
  35. return "int"
  36. case StringConfigurationOption:
  37. return "string"
  38. }
  39. panic(fmt.Sprintf("Invalid ConfigurationOptionType value %d", opt))
  40. }
  41. // ConfigurationOption allows for the unified, retrospective way to setup PipelineItem-s.
  42. type ConfigurationOption struct {
  43. // Name identifies the configuration option in facts.
  44. Name string
  45. // Description represents the help text about the configuration option.
  46. Description string
  47. // Flag corresponds to the CLI token with "--" prepended.
  48. Flag string
  49. // Type specifies the kind of the configuration option's value.
  50. Type ConfigurationOptionType
  51. // Default is the initial value of the configuration option.
  52. Default interface{}
  53. }
  54. // FormatDefault converts the default value of ConfigurationOption to string.
  55. // Used in the command line interface to show the argument's default value.
  56. func (opt ConfigurationOption) FormatDefault() string {
  57. if opt.Type != StringConfigurationOption {
  58. return fmt.Sprint(opt.Default)
  59. }
  60. return fmt.Sprintf("\"%s\"", opt.Default)
  61. }
  62. // PipelineItem is the interface for all the units in the Git commits analysis pipeline.
  63. type PipelineItem interface {
  64. // Name returns the name of the analysis.
  65. Name() string
  66. // Provides returns the list of keys of reusable calculated entities.
  67. // Other items may depend on them.
  68. Provides() []string
  69. // Requires returns the list of keys of needed entities which must be supplied in Consume().
  70. Requires() []string
  71. // ListConfigurationOptions returns the list of available options which can be consumed by Configure().
  72. ListConfigurationOptions() []ConfigurationOption
  73. // Configure performs the initial setup of the object by applying parameters from facts.
  74. // It allows to create PipelineItems in a universal way.
  75. Configure(facts map[string]interface{})
  76. // Initialize prepares and resets the item. Consume() requires Initialize()
  77. // to be called at least once beforehand.
  78. Initialize(*git.Repository)
  79. // Consume processes the next commit.
  80. // deps contains the required entities which match Depends(). Besides, it always includes
  81. // "commit" and "index".
  82. // Returns the calculated entities which match Provides().
  83. Consume(deps map[string]interface{}) (map[string]interface{}, error)
  84. }
  85. // FeaturedPipelineItem enables switching the automatic insertion of pipeline items on or off.
  86. type FeaturedPipelineItem interface {
  87. PipelineItem
  88. // Features returns the list of names which enable this item to be automatically inserted
  89. // in Pipeline.DeployItem().
  90. Features() []string
  91. }
  92. // LeafPipelineItem corresponds to the top level pipeline items which produce the end results.
  93. type LeafPipelineItem interface {
  94. PipelineItem
  95. // Flag returns the cmdline name of the item.
  96. Flag() string
  97. // Finalize returns the result of the analysis.
  98. Finalize() interface{}
  99. // Serialize encodes the object returned by Finalize() to YAML or Protocol Buffers.
  100. Serialize(result interface{}, binary bool, writer io.Writer) error
  101. }
  102. // MergeablePipelineItem specifies the methods to combine several analysis results together.
  103. type MergeablePipelineItem interface {
  104. LeafPipelineItem
  105. // Deserialize loads the result from Protocol Buffers blob.
  106. Deserialize(pbmessage []byte) (interface{}, error)
  107. // MergeResults joins two results together. Common-s are specified as the global state.
  108. MergeResults(r1, r2 interface{}, c1, c2 *CommonAnalysisResult) interface{}
  109. }
  110. // CommonAnalysisResult holds the information which is always extracted at Pipeline.Run().
  111. type CommonAnalysisResult struct {
  112. // Time of the first commit in the analysed sequence.
  113. BeginTime int64
  114. // Time of the last commit in the analysed sequence.
  115. EndTime int64
  116. // The number of commits in the analysed sequence.
  117. CommitsNumber int
  118. // The duration of Pipeline.Run().
  119. RunTime time.Duration
  120. }
  121. // BeginTimeAsTime converts the UNIX timestamp of the beginning to Go time.
  122. func (car *CommonAnalysisResult) BeginTimeAsTime() time.Time {
  123. return time.Unix(car.BeginTime, 0)
  124. }
  125. // EndTimeAsTime converts the UNIX timestamp of the ending to Go time.
  126. func (car *CommonAnalysisResult) EndTimeAsTime() time.Time {
  127. return time.Unix(car.EndTime, 0)
  128. }
  129. // Merge combines the CommonAnalysisResult with an other one.
  130. // We choose the earlier BeginTime, the later EndTime, sum the number of commits and the
  131. // elapsed run times.
  132. func (car *CommonAnalysisResult) Merge(other *CommonAnalysisResult) {
  133. if car.EndTime == 0 || other.BeginTime == 0 {
  134. panic("Merging with an uninitialized CommonAnalysisResult")
  135. }
  136. if other.BeginTime < car.BeginTime {
  137. car.BeginTime = other.BeginTime
  138. }
  139. if other.EndTime > car.EndTime {
  140. car.EndTime = other.EndTime
  141. }
  142. car.CommitsNumber += other.CommitsNumber
  143. car.RunTime += other.RunTime
  144. }
  145. // FillMetadata copies the data to a Protobuf message.
  146. func (car *CommonAnalysisResult) FillMetadata(meta *pb.Metadata) *pb.Metadata {
  147. meta.BeginUnixTime = car.BeginTime
  148. meta.EndUnixTime = car.EndTime
  149. meta.Commits = int32(car.CommitsNumber)
  150. meta.RunTime = car.RunTime.Nanoseconds() / 1e6
  151. return meta
  152. }
  153. // MetadataToCommonAnalysisResult copies the data from a Protobuf message.
  154. func MetadataToCommonAnalysisResult(meta *pb.Metadata) *CommonAnalysisResult {
  155. return &CommonAnalysisResult{
  156. BeginTime: meta.BeginUnixTime,
  157. EndTime: meta.EndUnixTime,
  158. CommitsNumber: int(meta.Commits),
  159. RunTime: time.Duration(meta.RunTime * 1e6),
  160. }
  161. }
  162. // Pipeline is the core Hercules entity which carries several PipelineItems and executes them.
  163. // See the extended example of how a Pipeline works in doc.go
  164. type Pipeline struct {
  165. // OnProgress is the callback which is invoked in Analyse() to output it's
  166. // progress. The first argument is the number of processed commits and the
  167. // second is the total number of commits.
  168. OnProgress func(int, int)
  169. // Repository points to the analysed Git repository struct from go-git.
  170. repository *git.Repository
  171. // Items are the registered building blocks in the pipeline. The order defines the
  172. // execution sequence.
  173. items []PipelineItem
  174. // The collection of parameters to create items.
  175. facts map[string]interface{}
  176. // Feature flags which enable the corresponding items.
  177. features map[string]bool
  178. }
  179. const (
  180. // ConfigPipelineDumpPath is the name of the Pipeline configuration option (Pipeline.Initialize())
  181. // which enables saving the items DAG to the specified file.
  182. ConfigPipelineDumpPath = "Pipeline.DumpPath"
  183. // ConfigPipelineDryRun is the name of the Pipeline configuration option (Pipeline.Initialize())
  184. // which disables Configure() and Initialize() invocation on each PipelineItem during the
  185. // Pipeline initialization.
  186. // Subsequent Run() calls are going to fail. Useful with ConfigPipelineDumpPath=true.
  187. ConfigPipelineDryRun = "Pipeline.DryRun"
  188. // ConfigPipelineCommits is the name of the Pipeline configuration option (Pipeline.Initialize())
  189. // which allows to specify the custom commit sequence. By default, Pipeline.Commits() is used.
  190. ConfigPipelineCommits = "commits"
  191. )
  192. // NewPipeline initializes a new instance of Pipeline struct.
  193. func NewPipeline(repository *git.Repository) *Pipeline {
  194. return &Pipeline{
  195. repository: repository,
  196. items: []PipelineItem{},
  197. facts: map[string]interface{}{},
  198. features: map[string]bool{},
  199. }
  200. }
  201. // GetFact returns the value of the fact with the specified name.
  202. func (pipeline *Pipeline) GetFact(name string) interface{} {
  203. return pipeline.facts[name]
  204. }
  205. // SetFact sets the value of the fact with the specified name.
  206. func (pipeline *Pipeline) SetFact(name string, value interface{}) {
  207. pipeline.facts[name] = value
  208. }
  209. // GetFeature returns the state of the feature with the specified name (enabled/disabled) and
  210. // whether it exists. See also: FeaturedPipelineItem.
  211. func (pipeline *Pipeline) GetFeature(name string) (bool, bool) {
  212. val, exists := pipeline.features[name]
  213. return val, exists
  214. }
  215. // SetFeature sets the value of the feature with the specified name.
  216. // See also: FeaturedPipelineItem.
  217. func (pipeline *Pipeline) SetFeature(name string) {
  218. pipeline.features[name] = true
  219. }
  220. // SetFeaturesFromFlags enables the features which were specified through the command line flags
  221. // which belong to the given PipelineItemRegistry instance.
  222. // See also: AddItem().
  223. func (pipeline *Pipeline) SetFeaturesFromFlags(registry ...*PipelineItemRegistry) {
  224. var ffr *PipelineItemRegistry
  225. if len(registry) == 0 {
  226. ffr = Registry
  227. } else if len(registry) == 1 {
  228. ffr = registry[0]
  229. } else {
  230. panic("Zero or one registry is allowed to be passed.")
  231. }
  232. for _, feature := range ffr.featureFlags.Flags {
  233. pipeline.SetFeature(feature)
  234. }
  235. }
  236. // DeployItem inserts a PipelineItem into the pipeline. It also recursively creates all of it's
  237. // dependencies (PipelineItem.Requires()). Returns the same item as specified in the arguments.
  238. func (pipeline *Pipeline) DeployItem(item PipelineItem) PipelineItem {
  239. fpi, ok := item.(FeaturedPipelineItem)
  240. if ok {
  241. for _, f := range fpi.Features() {
  242. pipeline.SetFeature(f)
  243. }
  244. }
  245. queue := []PipelineItem{}
  246. queue = append(queue, item)
  247. added := map[string]PipelineItem{}
  248. for _, item := range pipeline.items {
  249. added[item.Name()] = item
  250. }
  251. added[item.Name()] = item
  252. pipeline.AddItem(item)
  253. for len(queue) > 0 {
  254. head := queue[0]
  255. queue = queue[1:]
  256. for _, dep := range head.Requires() {
  257. for _, sibling := range Registry.Summon(dep) {
  258. if _, exists := added[sibling.Name()]; !exists {
  259. disabled := false
  260. // If this item supports features, check them against the activated in pipeline.features
  261. if fpi, matches := sibling.(FeaturedPipelineItem); matches {
  262. for _, feature := range fpi.Features() {
  263. if !pipeline.features[feature] {
  264. disabled = true
  265. break
  266. }
  267. }
  268. }
  269. if disabled {
  270. continue
  271. }
  272. added[sibling.Name()] = sibling
  273. queue = append(queue, sibling)
  274. pipeline.AddItem(sibling)
  275. }
  276. }
  277. }
  278. }
  279. return item
  280. }
  281. // AddItem inserts a PipelineItem into the pipeline. It does not check any dependencies.
  282. // See also: DeployItem().
  283. func (pipeline *Pipeline) AddItem(item PipelineItem) PipelineItem {
  284. pipeline.items = append(pipeline.items, item)
  285. return item
  286. }
  287. // RemoveItem deletes a PipelineItem from the pipeline. It leaves all the rest of the items intact.
  288. func (pipeline *Pipeline) RemoveItem(item PipelineItem) {
  289. for i, reg := range pipeline.items {
  290. if reg == item {
  291. pipeline.items = append(pipeline.items[:i], pipeline.items[i+1:]...)
  292. return
  293. }
  294. }
  295. }
  296. // Len returns the number of items in the pipeline.
  297. func (pipeline *Pipeline) Len() int {
  298. return len(pipeline.items)
  299. }
  300. // Commits returns the critical path in the repository's history. It starts
  301. // from HEAD and traces commits backwards till the root. When it encounters
  302. // a merge (more than one parent), it always chooses the first parent.
  303. func (pipeline *Pipeline) Commits() []*object.Commit {
  304. result := []*object.Commit{}
  305. repository := pipeline.repository
  306. head, err := repository.Head()
  307. if err != nil {
  308. panic(err)
  309. }
  310. commit, err := repository.CommitObject(head.Hash())
  311. if err != nil {
  312. panic(err)
  313. }
  314. // the first parent matches the head
  315. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  316. if err != nil {
  317. panic(err)
  318. }
  319. result = append(result, commit)
  320. }
  321. // reverse the order
  322. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  323. result[i], result[j] = result[j], result[i]
  324. }
  325. return result
  326. }
  327. type sortablePipelineItems []PipelineItem
  328. func (items sortablePipelineItems) Len() int {
  329. return len(items)
  330. }
  331. func (items sortablePipelineItems) Less(i, j int) bool {
  332. return items[i].Name() < items[j].Name()
  333. }
  334. func (items sortablePipelineItems) Swap(i, j int) {
  335. items[i], items[j] = items[j], items[i]
  336. }
  337. func (pipeline *Pipeline) resolve(dumpPath string) {
  338. graph := toposort.NewGraph()
  339. sort.Sort(sortablePipelineItems(pipeline.items))
  340. name2item := map[string]PipelineItem{}
  341. ambiguousMap := map[string][]string{}
  342. nameUsages := map[string]int{}
  343. for _, item := range pipeline.items {
  344. nameUsages[item.Name()]++
  345. }
  346. counters := map[string]int{}
  347. for _, item := range pipeline.items {
  348. name := item.Name()
  349. if nameUsages[name] > 1 {
  350. index := counters[item.Name()] + 1
  351. counters[item.Name()] = index
  352. name = fmt.Sprintf("%s_%d", item.Name(), index)
  353. }
  354. graph.AddNode(name)
  355. name2item[name] = item
  356. for _, key := range item.Provides() {
  357. key = "[" + key + "]"
  358. graph.AddNode(key)
  359. if graph.AddEdge(name, key) > 1 {
  360. if ambiguousMap[key] != nil {
  361. fmt.Fprintln(os.Stderr, "Pipeline:")
  362. for _, item2 := range pipeline.items {
  363. if item2 == item {
  364. fmt.Fprint(os.Stderr, "> ")
  365. }
  366. fmt.Fprint(os.Stderr, item2.Name(), " [")
  367. for i, key2 := range item2.Provides() {
  368. fmt.Fprint(os.Stderr, key2)
  369. if i < len(item.Provides())-1 {
  370. fmt.Fprint(os.Stderr, ", ")
  371. }
  372. }
  373. fmt.Fprintln(os.Stderr, "]")
  374. }
  375. panic("Failed to resolve pipeline dependencies: ambiguous graph.")
  376. }
  377. ambiguousMap[key] = graph.FindParents(key)
  378. }
  379. }
  380. }
  381. counters = map[string]int{}
  382. for _, item := range pipeline.items {
  383. name := item.Name()
  384. if nameUsages[name] > 1 {
  385. index := counters[item.Name()] + 1
  386. counters[item.Name()] = index
  387. name = fmt.Sprintf("%s_%d", item.Name(), index)
  388. }
  389. for _, key := range item.Requires() {
  390. key = "[" + key + "]"
  391. if graph.AddEdge(key, name) == 0 {
  392. panic(fmt.Sprintf("Unsatisfied dependency: %s -> %s", key, item.Name()))
  393. }
  394. }
  395. }
  396. // Try to break the cycles in some known scenarios.
  397. if len(ambiguousMap) > 0 {
  398. ambiguous := []string{}
  399. for key := range ambiguousMap {
  400. ambiguous = append(ambiguous, key)
  401. }
  402. sort.Strings(ambiguous)
  403. bfsorder := graph.BreadthSort()
  404. bfsindex := map[string]int{}
  405. for i, s := range bfsorder {
  406. bfsindex[s] = i
  407. }
  408. for len(ambiguous) > 0 {
  409. key := ambiguous[0]
  410. ambiguous = ambiguous[1:]
  411. pair := ambiguousMap[key]
  412. inheritor := pair[1]
  413. if bfsindex[pair[1]] < bfsindex[pair[0]] {
  414. inheritor = pair[0]
  415. }
  416. removed := graph.RemoveEdge(key, inheritor)
  417. cycle := map[string]bool{}
  418. for _, node := range graph.FindCycle(key) {
  419. cycle[node] = true
  420. }
  421. if len(cycle) == 0 {
  422. cycle[inheritor] = true
  423. }
  424. if removed {
  425. graph.AddEdge(key, inheritor)
  426. }
  427. graph.RemoveEdge(inheritor, key)
  428. graph.ReindexNode(inheritor)
  429. // for all nodes key links to except those in cycle, put the link from inheritor
  430. for _, node := range graph.FindChildren(key) {
  431. if _, exists := cycle[node]; !exists {
  432. graph.AddEdge(inheritor, node)
  433. graph.RemoveEdge(key, node)
  434. }
  435. }
  436. graph.ReindexNode(key)
  437. }
  438. }
  439. var graphCopy *toposort.Graph
  440. if dumpPath != "" {
  441. graphCopy = graph.Copy()
  442. }
  443. strplan, ok := graph.Toposort()
  444. if !ok {
  445. panic("Failed to resolve pipeline dependencies: unable to topologically sort the items.")
  446. }
  447. pipeline.items = make([]PipelineItem, 0, len(pipeline.items))
  448. for _, key := range strplan {
  449. if item, ok := name2item[key]; ok {
  450. pipeline.items = append(pipeline.items, item)
  451. }
  452. }
  453. if dumpPath != "" {
  454. // If there is a floating difference, uncomment this:
  455. // fmt.Fprint(os.Stderr, graphCopy.DebugDump())
  456. ioutil.WriteFile(dumpPath, []byte(graphCopy.Serialize(strplan)), 0666)
  457. absPath, _ := filepath.Abs(dumpPath)
  458. fmt.Fprintf(os.Stderr, "Wrote the DAG to %s\n", absPath)
  459. }
  460. }
  461. // Initialize prepares the pipeline for the execution (Run()). This function
  462. // resolves the execution DAG, Configure()-s and Initialize()-s the items in it in the
  463. // topological dependency order. `facts` are passed inside Configure(). They are mutable.
  464. func (pipeline *Pipeline) Initialize(facts map[string]interface{}) {
  465. if facts == nil {
  466. facts = map[string]interface{}{}
  467. }
  468. if _, exists := facts[ConfigPipelineCommits]; !exists {
  469. facts[ConfigPipelineCommits] = pipeline.Commits()
  470. }
  471. dumpPath, _ := facts[ConfigPipelineDumpPath].(string)
  472. pipeline.resolve(dumpPath)
  473. if dryRun, _ := facts[ConfigPipelineDryRun].(bool); dryRun {
  474. return
  475. }
  476. for _, item := range pipeline.items {
  477. item.Configure(facts)
  478. }
  479. for _, item := range pipeline.items {
  480. item.Initialize(pipeline.repository)
  481. }
  482. }
  483. // Run method executes the pipeline.
  484. //
  485. // commits is a slice with the sequential commit history. It shall start from
  486. // the root (ascending order).
  487. //
  488. // Returns the mapping from each LeafPipelineItem to the corresponding analysis result.
  489. // There is always a "nil" record with CommonAnalysisResult.
  490. func (pipeline *Pipeline) Run(commits []*object.Commit) (map[LeafPipelineItem]interface{}, error) {
  491. startRunTime := time.Now()
  492. onProgress := pipeline.OnProgress
  493. if onProgress == nil {
  494. onProgress = func(int, int) {}
  495. }
  496. for index, commit := range commits {
  497. onProgress(index, len(commits))
  498. state := map[string]interface{}{"commit": commit, "index": index}
  499. for _, item := range pipeline.items {
  500. update, err := item.Consume(state)
  501. if err != nil {
  502. fmt.Fprintf(os.Stderr, "%s failed on commit #%d %s\n",
  503. item.Name(), index, commit.Hash.String())
  504. return nil, err
  505. }
  506. for _, key := range item.Provides() {
  507. val, ok := update[key]
  508. if !ok {
  509. panic(fmt.Sprintf("%s: Consume() did not return %s", item.Name(), key))
  510. }
  511. state[key] = val
  512. }
  513. }
  514. }
  515. onProgress(len(commits), len(commits))
  516. result := map[LeafPipelineItem]interface{}{}
  517. for _, item := range pipeline.items {
  518. if casted, ok := item.(LeafPipelineItem); ok {
  519. result[casted] = casted.Finalize()
  520. }
  521. }
  522. result[nil] = &CommonAnalysisResult{
  523. BeginTime: commits[0].Author.When.Unix(),
  524. EndTime: commits[len(commits)-1].Author.When.Unix(),
  525. CommitsNumber: len(commits),
  526. RunTime: time.Since(startRunTime),
  527. }
  528. return result, nil
  529. }
  530. // LoadCommitsFromFile reads the file by the specified FS path and generates the sequence of commits
  531. // by interpreting each line as a Git commit hash.
  532. func LoadCommitsFromFile(path string, repository *git.Repository) ([]*object.Commit, error) {
  533. var file io.ReadCloser
  534. if path != "-" {
  535. var err error
  536. file, err = os.Open(path)
  537. if err != nil {
  538. return nil, err
  539. }
  540. defer file.Close()
  541. } else {
  542. file = os.Stdin
  543. }
  544. scanner := bufio.NewScanner(file)
  545. commits := []*object.Commit{}
  546. for scanner.Scan() {
  547. hash := plumbing.NewHash(scanner.Text())
  548. if len(hash) != 20 {
  549. return nil, errors.New("invalid commit hash " + scanner.Text())
  550. }
  551. commit, err := repository.CommitObject(hash)
  552. if err != nil {
  553. return nil, err
  554. }
  555. commits = append(commits, commit)
  556. }
  557. return commits, nil
  558. }