pipeline.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. package hercules
  2. import (
  3. "bufio"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "os"
  9. "path/filepath"
  10. "sort"
  11. "time"
  12. "gopkg.in/src-d/go-git.v4"
  13. "gopkg.in/src-d/go-git.v4/plumbing"
  14. "gopkg.in/src-d/go-git.v4/plumbing/object"
  15. "gopkg.in/src-d/hercules.v3/pb"
  16. "gopkg.in/src-d/hercules.v3/toposort"
  17. )
  18. // ConfigurationOptionType represents the possible types of a ConfigurationOption's value.
  19. type ConfigurationOptionType int
  20. const (
  21. // Boolean value type.
  22. BoolConfigurationOption ConfigurationOptionType = iota
  23. // Integer value type.
  24. IntConfigurationOption
  25. // String value type.
  26. StringConfigurationOption
  27. )
  28. // String() returns an empty string for the boolean type, "int" for integers and "string" for
  29. // strings. It is used in the command line interface to show the argument's type.
  30. func (opt ConfigurationOptionType) String() string {
  31. switch opt {
  32. case BoolConfigurationOption:
  33. return ""
  34. case IntConfigurationOption:
  35. return "int"
  36. case StringConfigurationOption:
  37. return "string"
  38. }
  39. panic(fmt.Sprintf("Invalid ConfigurationOptionType value %d", opt))
  40. }
  41. // ConfigurationOption allows for the unified, retrospective way to setup PipelineItem-s.
  42. type ConfigurationOption struct {
  43. // Name identifies the configuration option in facts.
  44. Name string
  45. // Description represents the help text about the configuration option.
  46. Description string
  47. // Flag corresponds to the CLI token with "--" prepended.
  48. Flag string
  49. // Type specifies the kind of the configuration option's value.
  50. Type ConfigurationOptionType
  51. // Default is the initial value of the configuration option.
  52. Default interface{}
  53. }
  54. // FormatDefault() converts the default value of ConfigurationOption to string.
  55. // Used in the command line interface to show the argument's default value.
  56. func (opt ConfigurationOption) FormatDefault() string {
  57. if opt.Type != StringConfigurationOption {
  58. return fmt.Sprint(opt.Default)
  59. }
  60. return fmt.Sprintf("\"%s\"", opt.Default)
  61. }
  62. // PipelineItem is the interface for all the units in the Git commits analysis pipeline.
  63. type PipelineItem interface {
  64. // Name returns the name of the analysis.
  65. Name() string
  66. // Provides returns the list of keys of reusable calculated entities.
  67. // Other items may depend on them.
  68. Provides() []string
  69. // Requires returns the list of keys of needed entities which must be supplied in Consume().
  70. Requires() []string
  71. // ListConfigurationOptions returns the list of available options which can be consumed by Configure().
  72. ListConfigurationOptions() []ConfigurationOption
  73. // Configure performs the initial setup of the object by applying parameters from facts.
  74. // It allows to create PipelineItems in a universal way.
  75. Configure(facts map[string]interface{})
  76. // Initialize prepares and resets the item. Consume() requires Initialize()
  77. // to be called at least once beforehand.
  78. Initialize(*git.Repository)
  79. // Consume processes the next commit.
  80. // deps contains the required entities which match Depends(). Besides, it always includes
  81. // "commit" and "index".
  82. // Returns the calculated entities which match Provides().
  83. Consume(deps map[string]interface{}) (map[string]interface{}, error)
  84. }
  85. // FeaturedPipelineItem enables switching the automatic insertion of pipeline items on or off.
  86. type FeaturedPipelineItem interface {
  87. PipelineItem
  88. // Features returns the list of names which enable this item to be automatically inserted
  89. // in Pipeline.DeployItem().
  90. Features() []string
  91. }
  92. // LeafPipelineItem corresponds to the top level pipeline items which produce the end results.
  93. type LeafPipelineItem interface {
  94. PipelineItem
  95. // Flag returns the cmdline name of the item.
  96. Flag() string
  97. // Finalize returns the result of the analysis.
  98. Finalize() interface{}
  99. // Serialize encodes the object returned by Finalize() to YAML or Protocol Buffers.
  100. Serialize(result interface{}, binary bool, writer io.Writer) error
  101. }
  102. // MergeablePipelineItem specifies the methods to combine several analysis results together.
  103. type MergeablePipelineItem interface {
  104. LeafPipelineItem
  105. // Deserialize loads the result from Protocol Buffers blob.
  106. Deserialize(pbmessage []byte) (interface{}, error)
  107. // MergeResults joins two results together. Common-s are specified as the global state.
  108. MergeResults(r1, r2 interface{}, c1, c2 *CommonAnalysisResult) interface{}
  109. }
  110. // CommonAnalysisResult holds the information which is always extracted at Pipeline.Run().
  111. type CommonAnalysisResult struct {
  112. // Time of the first commit in the analysed sequence.
  113. BeginTime int64
  114. // Time of the last commit in the analysed sequence.
  115. EndTime int64
  116. // The number of commits in the analysed sequence.
  117. CommitsNumber int
  118. // The duration of Pipeline.Run().
  119. RunTime time.Duration
  120. }
  121. // BeginTimeAsTime() converts the UNIX timestamp of the beginning to Go time.
  122. func (car *CommonAnalysisResult) BeginTimeAsTime() time.Time {
  123. return time.Unix(car.BeginTime, 0)
  124. }
  125. // EndTimeAsTime() converts the UNIX timestamp of the ending to Go time.
  126. func (car *CommonAnalysisResult) EndTimeAsTime() time.Time {
  127. return time.Unix(car.EndTime, 0)
  128. }
  129. // Merge() combines the CommonAnalysisResult with an other one.
  130. // We choose the earlier BeginTime, the later EndTime, sum the number of commits and the
  131. // elapsed run times.
  132. func (car *CommonAnalysisResult) Merge(other *CommonAnalysisResult) {
  133. if car.EndTime == 0 || other.BeginTime == 0 {
  134. panic("Merging with an uninitialized CommonAnalysisResult")
  135. }
  136. if other.BeginTime < car.BeginTime {
  137. car.BeginTime = other.BeginTime
  138. }
  139. if other.EndTime > car.EndTime {
  140. car.EndTime = other.EndTime
  141. }
  142. car.CommitsNumber += other.CommitsNumber
  143. car.RunTime += other.RunTime
  144. }
  145. // FillMetadata() copies the data to a Protobuf message.
  146. func (car *CommonAnalysisResult) FillMetadata(meta *pb.Metadata) *pb.Metadata {
  147. meta.BeginUnixTime = car.BeginTime
  148. meta.EndUnixTime = car.EndTime
  149. meta.Commits = int32(car.CommitsNumber)
  150. meta.RunTime = car.RunTime.Nanoseconds() / 1e6
  151. return meta
  152. }
  153. // MetadataToCommonAnalysisResult() copies the data from a Protobuf message.
  154. func MetadataToCommonAnalysisResult(meta *pb.Metadata) *CommonAnalysisResult {
  155. return &CommonAnalysisResult{
  156. BeginTime: meta.BeginUnixTime,
  157. EndTime: meta.EndUnixTime,
  158. CommitsNumber: int(meta.Commits),
  159. RunTime: time.Duration(meta.RunTime * 1e6),
  160. }
  161. }
  162. // The core Hercules entity which carries several PipelineItems and executes them.
  163. // See the extended example of how a Pipeline works in doc.go.
  164. type Pipeline struct {
  165. // OnProgress is the callback which is invoked in Analyse() to output it's
  166. // progress. The first argument is the number of processed commits and the
  167. // second is the total number of commits.
  168. OnProgress func(int, int)
  169. // Repository points to the analysed Git repository struct from go-git.
  170. repository *git.Repository
  171. // Items are the registered building blocks in the pipeline. The order defines the
  172. // execution sequence.
  173. items []PipelineItem
  174. // The collection of parameters to create items.
  175. facts map[string]interface{}
  176. // Feature flags which enable the corresponding items.
  177. features map[string]bool
  178. }
  179. const (
  180. // Makes Pipeline to save the DAG to the specified file.
  181. ConfigPipelineDumpPath = "Pipeline.DumpPath"
  182. // Disables Configure() and Initialize() invokation on each PipelineItem during the initialization.
  183. // Subsequent Run() calls are going to fail. Useful with ConfigPipelineDumpPath=true.
  184. ConfigPipelineDryRun = "Pipeline.DryRun"
  185. // Allows to specify the custom commit chain. By default, Pipeline.Commits() is used.
  186. FactPipelineCommits = "commits"
  187. )
  188. func NewPipeline(repository *git.Repository) *Pipeline {
  189. return &Pipeline{
  190. repository: repository,
  191. items: []PipelineItem{},
  192. facts: map[string]interface{}{},
  193. features: map[string]bool{},
  194. }
  195. }
  196. func (pipeline *Pipeline) GetFact(name string) interface{} {
  197. return pipeline.facts[name]
  198. }
  199. func (pipeline *Pipeline) SetFact(name string, value interface{}) {
  200. pipeline.facts[name] = value
  201. }
  202. func (pipeline *Pipeline) GetFeature(name string) (bool, bool) {
  203. val, exists := pipeline.features[name]
  204. return val, exists
  205. }
  206. func (pipeline *Pipeline) SetFeature(name string) {
  207. pipeline.features[name] = true
  208. }
  209. func (pipeline *Pipeline) SetFeaturesFromFlags(registry ...*PipelineItemRegistry) {
  210. var ffr *PipelineItemRegistry
  211. if len(registry) == 0 {
  212. ffr = Registry
  213. } else if len(registry) == 1 {
  214. ffr = registry[0]
  215. } else {
  216. panic("Zero or one registry is allowed to be passed.")
  217. }
  218. for _, feature := range ffr.featureFlags.Flags {
  219. pipeline.SetFeature(feature)
  220. }
  221. }
  222. func (pipeline *Pipeline) DeployItem(item PipelineItem) PipelineItem {
  223. fpi, ok := item.(FeaturedPipelineItem)
  224. if ok {
  225. for _, f := range fpi.Features() {
  226. pipeline.SetFeature(f)
  227. }
  228. }
  229. queue := []PipelineItem{}
  230. queue = append(queue, item)
  231. added := map[string]PipelineItem{}
  232. for _, item := range pipeline.items {
  233. added[item.Name()] = item
  234. }
  235. added[item.Name()] = item
  236. pipeline.AddItem(item)
  237. for len(queue) > 0 {
  238. head := queue[0]
  239. queue = queue[1:]
  240. for _, dep := range head.Requires() {
  241. for _, sibling := range Registry.Summon(dep) {
  242. if _, exists := added[sibling.Name()]; !exists {
  243. disabled := false
  244. // If this item supports features, check them against the activated in pipeline.features
  245. if fpi, matches := sibling.(FeaturedPipelineItem); matches {
  246. for _, feature := range fpi.Features() {
  247. if !pipeline.features[feature] {
  248. disabled = true
  249. break
  250. }
  251. }
  252. }
  253. if disabled {
  254. continue
  255. }
  256. added[sibling.Name()] = sibling
  257. queue = append(queue, sibling)
  258. pipeline.AddItem(sibling)
  259. }
  260. }
  261. }
  262. }
  263. return item
  264. }
  265. func (pipeline *Pipeline) AddItem(item PipelineItem) PipelineItem {
  266. pipeline.items = append(pipeline.items, item)
  267. return item
  268. }
  269. func (pipeline *Pipeline) RemoveItem(item PipelineItem) {
  270. for i, reg := range pipeline.items {
  271. if reg == item {
  272. pipeline.items = append(pipeline.items[:i], pipeline.items[i+1:]...)
  273. return
  274. }
  275. }
  276. }
  277. func (pipeline *Pipeline) Len() int {
  278. return len(pipeline.items)
  279. }
  280. // Commits returns the critical path in the repository's history. It starts
  281. // from HEAD and traces commits backwards till the root. When it encounters
  282. // a merge (more than one parent), it always chooses the first parent.
  283. func (pipeline *Pipeline) Commits() []*object.Commit {
  284. result := []*object.Commit{}
  285. repository := pipeline.repository
  286. head, err := repository.Head()
  287. if err != nil {
  288. panic(err)
  289. }
  290. commit, err := repository.CommitObject(head.Hash())
  291. if err != nil {
  292. panic(err)
  293. }
  294. // the first parent matches the head
  295. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  296. if err != nil {
  297. panic(err)
  298. }
  299. result = append(result, commit)
  300. }
  301. // reverse the order
  302. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  303. result[i], result[j] = result[j], result[i]
  304. }
  305. return result
  306. }
  307. type sortablePipelineItems []PipelineItem
  308. func (items sortablePipelineItems) Len() int {
  309. return len(items)
  310. }
  311. func (items sortablePipelineItems) Less(i, j int) bool {
  312. return items[i].Name() < items[j].Name()
  313. }
  314. func (items sortablePipelineItems) Swap(i, j int) {
  315. items[i], items[j] = items[j], items[i]
  316. }
  317. func (pipeline *Pipeline) resolve(dumpPath string) {
  318. graph := toposort.NewGraph()
  319. sort.Sort(sortablePipelineItems(pipeline.items))
  320. name2item := map[string]PipelineItem{}
  321. ambiguousMap := map[string][]string{}
  322. nameUsages := map[string]int{}
  323. for _, item := range pipeline.items {
  324. nameUsages[item.Name()]++
  325. }
  326. counters := map[string]int{}
  327. for _, item := range pipeline.items {
  328. name := item.Name()
  329. if nameUsages[name] > 1 {
  330. index := counters[item.Name()] + 1
  331. counters[item.Name()] = index
  332. name = fmt.Sprintf("%s_%d", item.Name(), index)
  333. }
  334. graph.AddNode(name)
  335. name2item[name] = item
  336. for _, key := range item.Provides() {
  337. key = "[" + key + "]"
  338. graph.AddNode(key)
  339. if graph.AddEdge(name, key) > 1 {
  340. if ambiguousMap[key] != nil {
  341. fmt.Fprintln(os.Stderr, "Pipeline:")
  342. for _, item2 := range pipeline.items {
  343. if item2 == item {
  344. fmt.Fprint(os.Stderr, "> ")
  345. }
  346. fmt.Fprint(os.Stderr, item2.Name(), " [")
  347. for i, key2 := range item2.Provides() {
  348. fmt.Fprint(os.Stderr, key2)
  349. if i < len(item.Provides())-1 {
  350. fmt.Fprint(os.Stderr, ", ")
  351. }
  352. }
  353. fmt.Fprintln(os.Stderr, "]")
  354. }
  355. panic("Failed to resolve pipeline dependencies: ambiguous graph.")
  356. }
  357. ambiguousMap[key] = graph.FindParents(key)
  358. }
  359. }
  360. }
  361. counters = map[string]int{}
  362. for _, item := range pipeline.items {
  363. name := item.Name()
  364. if nameUsages[name] > 1 {
  365. index := counters[item.Name()] + 1
  366. counters[item.Name()] = index
  367. name = fmt.Sprintf("%s_%d", item.Name(), index)
  368. }
  369. for _, key := range item.Requires() {
  370. key = "[" + key + "]"
  371. if graph.AddEdge(key, name) == 0 {
  372. panic(fmt.Sprintf("Unsatisfied dependency: %s -> %s", key, item.Name()))
  373. }
  374. }
  375. }
  376. if len(ambiguousMap) > 0 {
  377. ambiguous := []string{}
  378. for key := range ambiguousMap {
  379. ambiguous = append(ambiguous, key)
  380. }
  381. sort.Strings(ambiguous)
  382. bfsorder := graph.BreadthSort()
  383. bfsindex := map[string]int{}
  384. for i, s := range bfsorder {
  385. bfsindex[s] = i
  386. }
  387. for len(ambiguous) > 0 {
  388. key := ambiguous[0]
  389. ambiguous = ambiguous[1:]
  390. pair := ambiguousMap[key]
  391. inheritor := pair[1]
  392. if bfsindex[pair[1]] < bfsindex[pair[0]] {
  393. inheritor = pair[0]
  394. }
  395. removed := graph.RemoveEdge(key, inheritor)
  396. cycle := map[string]bool{}
  397. for _, node := range graph.FindCycle(key) {
  398. cycle[node] = true
  399. }
  400. if len(cycle) == 0 {
  401. cycle[inheritor] = true
  402. }
  403. if removed {
  404. graph.AddEdge(key, inheritor)
  405. }
  406. graph.RemoveEdge(inheritor, key)
  407. graph.ReindexNode(inheritor)
  408. // for all nodes key links to except those in cycle, put the link from inheritor
  409. for _, node := range graph.FindChildren(key) {
  410. if _, exists := cycle[node]; !exists {
  411. graph.AddEdge(inheritor, node)
  412. graph.RemoveEdge(key, node)
  413. }
  414. }
  415. graph.ReindexNode(key)
  416. }
  417. }
  418. var graphCopy *toposort.Graph
  419. if dumpPath != "" {
  420. graphCopy = graph.Copy()
  421. }
  422. strplan, ok := graph.Toposort()
  423. if !ok {
  424. panic("Failed to resolve pipeline dependencies: unable to topologically sort the items.")
  425. }
  426. pipeline.items = make([]PipelineItem, 0, len(pipeline.items))
  427. for _, key := range strplan {
  428. if item, ok := name2item[key]; ok {
  429. pipeline.items = append(pipeline.items, item)
  430. }
  431. }
  432. if dumpPath != "" {
  433. // If there is a floating difference, uncomment this:
  434. // fmt.Fprint(os.Stderr, graphCopy.DebugDump())
  435. ioutil.WriteFile(dumpPath, []byte(graphCopy.Serialize(strplan)), 0666)
  436. absPath, _ := filepath.Abs(dumpPath)
  437. fmt.Fprintf(os.Stderr, "Wrote the DAG to %s\n", absPath)
  438. }
  439. }
  440. func (pipeline *Pipeline) Initialize(facts map[string]interface{}) {
  441. if facts == nil {
  442. facts = map[string]interface{}{}
  443. }
  444. if _, exists := facts[FactPipelineCommits]; !exists {
  445. facts[FactPipelineCommits] = pipeline.Commits()
  446. }
  447. dumpPath, _ := facts[ConfigPipelineDumpPath].(string)
  448. pipeline.resolve(dumpPath)
  449. if dryRun, _ := facts[ConfigPipelineDryRun].(bool); dryRun {
  450. return
  451. }
  452. for _, item := range pipeline.items {
  453. item.Configure(facts)
  454. }
  455. for _, item := range pipeline.items {
  456. item.Initialize(pipeline.repository)
  457. }
  458. }
  459. // Run method executes the pipeline.
  460. //
  461. // commits is a slice with the sequential commit history. It shall start from
  462. // the root (ascending order).
  463. //
  464. // Returns the mapping from each LeafPipelineItem to the corresponding analysis result.
  465. // There is always a "nil" record with CommonAnalysisResult.
  466. func (pipeline *Pipeline) Run(commits []*object.Commit) (map[LeafPipelineItem]interface{}, error) {
  467. startRunTime := time.Now()
  468. onProgress := pipeline.OnProgress
  469. if onProgress == nil {
  470. onProgress = func(int, int) {}
  471. }
  472. for index, commit := range commits {
  473. onProgress(index, len(commits))
  474. state := map[string]interface{}{"commit": commit, "index": index}
  475. for _, item := range pipeline.items {
  476. update, err := item.Consume(state)
  477. if err != nil {
  478. fmt.Fprintf(os.Stderr, "%s failed on commit #%d %s\n",
  479. item.Name(), index, commit.Hash.String())
  480. return nil, err
  481. }
  482. for _, key := range item.Provides() {
  483. val, ok := update[key]
  484. if !ok {
  485. panic(fmt.Sprintf("%s: Consume() did not return %s", item.Name(), key))
  486. }
  487. state[key] = val
  488. }
  489. }
  490. }
  491. onProgress(len(commits), len(commits))
  492. result := map[LeafPipelineItem]interface{}{}
  493. for _, item := range pipeline.items {
  494. if casted, ok := item.(LeafPipelineItem); ok {
  495. result[casted] = casted.Finalize()
  496. }
  497. }
  498. result[nil] = &CommonAnalysisResult{
  499. BeginTime: commits[0].Author.When.Unix(),
  500. EndTime: commits[len(commits)-1].Author.When.Unix(),
  501. CommitsNumber: len(commits),
  502. RunTime: time.Since(startRunTime),
  503. }
  504. return result, nil
  505. }
  506. func LoadCommitsFromFile(path string, repository *git.Repository) ([]*object.Commit, error) {
  507. var file io.ReadCloser
  508. if path != "-" {
  509. var err error
  510. file, err = os.Open(path)
  511. if err != nil {
  512. return nil, err
  513. }
  514. defer file.Close()
  515. } else {
  516. file = os.Stdin
  517. }
  518. scanner := bufio.NewScanner(file)
  519. commits := []*object.Commit{}
  520. for scanner.Scan() {
  521. hash := plumbing.NewHash(scanner.Text())
  522. if len(hash) != 20 {
  523. return nil, errors.New("invalid commit hash " + scanner.Text())
  524. }
  525. commit, err := repository.CommitObject(hash)
  526. if err != nil {
  527. return nil, err
  528. }
  529. commits = append(commits, commit)
  530. }
  531. return commits, nil
  532. }