pipeline.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. package hercules
  2. import (
  3. "bufio"
  4. "errors"
  5. "flag"
  6. "fmt"
  7. "io"
  8. "io/ioutil"
  9. "os"
  10. "path/filepath"
  11. "reflect"
  12. "sort"
  13. "strings"
  14. "time"
  15. "unsafe"
  16. "gopkg.in/src-d/go-git.v4"
  17. "gopkg.in/src-d/go-git.v4/plumbing"
  18. "gopkg.in/src-d/go-git.v4/plumbing/object"
  19. "gopkg.in/src-d/hercules.v3/pb"
  20. "gopkg.in/src-d/hercules.v3/toposort"
  21. )
  22. type ConfigurationOptionType int
  23. const (
  24. // Boolean value type.
  25. BoolConfigurationOption ConfigurationOptionType = iota
  26. // Integer value type.
  27. IntConfigurationOption
  28. // String value type.
  29. StringConfigurationOption
  30. )
  31. const (
  32. ConfigPipelineDumpPath = "Pipeline.DumpPath"
  33. ConfigPipelineDryRun = "Pipeline.DryRun"
  34. )
  35. // ConfigurationOption allows for the unified, retrospective way to setup PipelineItem-s.
  36. type ConfigurationOption struct {
  37. // Name identifies the configuration option in facts.
  38. Name string
  39. // Description represents the help text about the configuration option.
  40. Description string
  41. // Flag corresponds to the CLI token with "-" prepended.
  42. Flag string
  43. // Type specifies the kind of the configuration option's value.
  44. Type ConfigurationOptionType
  45. // Default is the initial value of the configuration option.
  46. Default interface{}
  47. }
  48. // PipelineItem is the interface for all the units of the Git commit analysis pipeline.
  49. type PipelineItem interface {
  50. // Name returns the name of the analysis.
  51. Name() string
  52. // Provides returns the list of keys of reusable calculated entities.
  53. // Other items may depend on them.
  54. Provides() []string
  55. // Requires returns the list of keys of needed entities which must be supplied in Consume().
  56. Requires() []string
  57. // ListConfigurationOptions returns the list of available options which can be consumed by Configure().
  58. ListConfigurationOptions() []ConfigurationOption
  59. // Configure performs the initial setup of the object by applying parameters from facts.
  60. // It allows to create PipelineItems in a universal way.
  61. Configure(facts map[string]interface{})
  62. // Initialize prepares and resets the item. Consume() requires Initialize()
  63. // to be called at least once beforehand.
  64. Initialize(*git.Repository)
  65. // Consume processes the next commit.
  66. // deps contains the required entities which match Depends(). Besides, it always includes
  67. // "commit" and "index".
  68. // Returns the calculated entities which match Provides().
  69. Consume(deps map[string]interface{}) (map[string]interface{}, error)
  70. }
  71. // FeaturedPipelineItem enables switching the automatic insertion of pipeline items on or off.
  72. type FeaturedPipelineItem interface {
  73. PipelineItem
  74. // Features returns the list of names which enable this item to be automatically inserted
  75. // in Pipeline.DeployItem().
  76. Features() []string
  77. }
  78. // LeafPipelineItem corresponds to the top level pipeline items which produce the end results.
  79. type LeafPipelineItem interface {
  80. PipelineItem
  81. // Flag returns the cmdline name of the item.
  82. Flag() string
  83. // Finalize returns the result of the analysis.
  84. Finalize() interface{}
  85. // Serialize encodes the object returned by Finalize() to YAML or Protocol Buffers.
  86. Serialize(result interface{}, binary bool, writer io.Writer) error
  87. }
  88. // MergeablePipelineItem specifies the methods to combine several analysis results together.
  89. type MergeablePipelineItem interface {
  90. LeafPipelineItem
  91. // Deserialize loads the result from Protocol Buffers blob.
  92. Deserialize(pbmessage []byte) (interface{}, error)
  93. // MergeResults joins two results together. Common-s are specified as the global state.
  94. MergeResults(r1, r2 interface{}, c1, c2 *CommonAnalysisResult) interface{}
  95. }
  96. // CommonAnalysisResult holds the information which is always extracted at Pipeline.Run().
  97. type CommonAnalysisResult struct {
  98. // Time of the first commit in the analysed sequence.
  99. BeginTime int64
  100. // Time of the last commit in the analysed sequence.
  101. EndTime int64
  102. // The number of commits in the analysed sequence.
  103. CommitsNumber int
  104. // The duration of Pipeline.Run().
  105. RunTime time.Duration
  106. }
  107. func (car *CommonAnalysisResult) BeginTimeAsTime() time.Time {
  108. return time.Unix(car.BeginTime, 0)
  109. }
  110. func (car *CommonAnalysisResult) EndTimeAsTime() time.Time {
  111. return time.Unix(car.EndTime, 0)
  112. }
  113. func (car *CommonAnalysisResult) Merge(other *CommonAnalysisResult) {
  114. if car.EndTime == 0 || other.BeginTime == 0 {
  115. panic("Merging with an uninitialized CommonAnalysisResult")
  116. }
  117. if other.BeginTime < car.BeginTime {
  118. car.BeginTime = other.BeginTime
  119. }
  120. if other.EndTime > car.EndTime {
  121. car.EndTime = other.EndTime
  122. }
  123. car.CommitsNumber += other.CommitsNumber
  124. car.RunTime += other.RunTime
  125. }
  126. func (car *CommonAnalysisResult) FillMetadata(meta *pb.Metadata) *pb.Metadata {
  127. meta.BeginUnixTime = car.BeginTime
  128. meta.EndUnixTime = car.EndTime
  129. meta.Commits = int32(car.CommitsNumber)
  130. meta.RunTime = car.RunTime.Nanoseconds() / 1e6
  131. return meta
  132. }
  133. func MetadataToCommonAnalysisResult(meta *pb.Metadata) *CommonAnalysisResult {
  134. return &CommonAnalysisResult{
  135. BeginTime: meta.BeginUnixTime,
  136. EndTime: meta.EndUnixTime,
  137. CommitsNumber: int(meta.Commits),
  138. RunTime: time.Duration(meta.RunTime * 1e6),
  139. }
  140. }
  141. // PipelineItemRegistry contains all the known PipelineItem-s.
  142. type PipelineItemRegistry struct {
  143. provided map[string][]reflect.Type
  144. registered map[string]reflect.Type
  145. flags map[string]reflect.Type
  146. }
  147. // Register adds another PipelineItem to the registry.
  148. func (registry *PipelineItemRegistry) Register(example PipelineItem) {
  149. t := reflect.TypeOf(example)
  150. registry.registered[example.Name()] = t
  151. if fpi, ok := example.(LeafPipelineItem); ok {
  152. registry.flags[fpi.Flag()] = t
  153. }
  154. for _, dep := range example.Provides() {
  155. ts := registry.provided[dep]
  156. if ts == nil {
  157. ts = []reflect.Type{}
  158. }
  159. ts = append(ts, t)
  160. registry.provided[dep] = ts
  161. }
  162. }
  163. func (registry *PipelineItemRegistry) Summon(providesOrName string) []PipelineItem {
  164. if registry.provided == nil {
  165. return []PipelineItem{}
  166. }
  167. ts := registry.provided[providesOrName]
  168. items := []PipelineItem{}
  169. for _, t := range ts {
  170. items = append(items, reflect.New(t.Elem()).Interface().(PipelineItem))
  171. }
  172. if t, exists := registry.registered[providesOrName]; exists {
  173. items = append(items, reflect.New(t.Elem()).Interface().(PipelineItem))
  174. }
  175. return items
  176. }
  177. type arrayFeatureFlags struct {
  178. // Flags containts the features activated through the command line.
  179. Flags []string
  180. // Choices contains all registered features.
  181. Choices map[string]bool
  182. }
  183. func (acf *arrayFeatureFlags) String() string {
  184. return strings.Join([]string(acf.Flags), ", ")
  185. }
  186. func (acf *arrayFeatureFlags) Set(value string) error {
  187. if _, exists := acf.Choices[value]; !exists {
  188. return errors.New(fmt.Sprintf("Feature \"%s\" is not registered.", value))
  189. }
  190. acf.Flags = append(acf.Flags, value)
  191. return nil
  192. }
  193. var featureFlags = arrayFeatureFlags{Flags: []string{}, Choices: map[string]bool{}}
  194. // AddFlags inserts the cmdline options from PipelineItem.ListConfigurationOptions(),
  195. // FeaturedPipelineItem().Features() and LeafPipelineItem.Flag() into the global "flag" parser
  196. // built into the Go runtime.
  197. // Returns the "facts" which can be fed into PipelineItem.Configure() and the dictionary of
  198. // runnable analysis (LeafPipelineItem) choices. E.g. if "BurndownAnalysis" was activated
  199. // through "-burndown" cmdline argument, this mapping would contain ["BurndownAnalysis"] = *true.
  200. func (registry *PipelineItemRegistry) AddFlags() (map[string]interface{}, map[string]*bool) {
  201. flags := map[string]interface{}{}
  202. deployed := map[string]*bool{}
  203. for name, it := range registry.registered {
  204. formatHelp := func(desc string) string {
  205. return fmt.Sprintf("%s [%s]", desc, name)
  206. }
  207. itemIface := reflect.New(it.Elem()).Interface()
  208. for _, opt := range itemIface.(PipelineItem).ListConfigurationOptions() {
  209. var iface interface{}
  210. switch opt.Type {
  211. case BoolConfigurationOption:
  212. iface = interface{}(true)
  213. ptr := (**bool)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  214. *ptr = flag.Bool(opt.Flag, opt.Default.(bool), formatHelp(opt.Description))
  215. case IntConfigurationOption:
  216. iface = interface{}(0)
  217. ptr := (**int)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  218. *ptr = flag.Int(opt.Flag, opt.Default.(int), formatHelp(opt.Description))
  219. case StringConfigurationOption:
  220. iface = interface{}("")
  221. ptr := (**string)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  222. *ptr = flag.String(opt.Flag, opt.Default.(string), formatHelp(opt.Description))
  223. }
  224. flags[opt.Name] = iface
  225. }
  226. if fpi, ok := itemIface.(FeaturedPipelineItem); ok {
  227. for _, f := range fpi.Features() {
  228. featureFlags.Choices[f] = true
  229. }
  230. }
  231. if fpi, ok := itemIface.(LeafPipelineItem); ok {
  232. deployed[fpi.Name()] = flag.Bool(
  233. fpi.Flag(), false, fmt.Sprintf("Runs %s analysis.", fpi.Name()))
  234. }
  235. }
  236. {
  237. // Pipeline flags
  238. iface := interface{}("")
  239. ptr1 := (**string)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  240. *ptr1 = flag.String("dump-dag", "", "Write the pipeline DAG to a Graphviz file.")
  241. flags[ConfigPipelineDumpPath] = iface
  242. iface = interface{}(true)
  243. ptr2 := (**bool)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  244. *ptr2 = flag.Bool("dry-run", false, "Do not run any analyses - only resolve the DAG. "+
  245. "Useful for -dump-dag.")
  246. flags[ConfigPipelineDryRun] = iface
  247. }
  248. features := []string{}
  249. for f := range featureFlags.Choices {
  250. features = append(features, f)
  251. }
  252. flag.Var(&featureFlags, "feature",
  253. fmt.Sprintf("Enables specific analysis features, can be specified "+
  254. "multiple times. Available features: [%s].", strings.Join(features, ", ")))
  255. return flags, deployed
  256. }
  257. // Registry contains all known pipeline item types.
  258. var Registry = &PipelineItemRegistry{
  259. provided: map[string][]reflect.Type{},
  260. registered: map[string]reflect.Type{},
  261. flags: map[string]reflect.Type{},
  262. }
  263. type Pipeline struct {
  264. // OnProgress is the callback which is invoked in Analyse() to output it's
  265. // progress. The first argument is the number of processed commits and the
  266. // second is the total number of commits.
  267. OnProgress func(int, int)
  268. // Repository points to the analysed Git repository struct from go-git.
  269. repository *git.Repository
  270. // Items are the registered building blocks in the pipeline. The order defines the
  271. // execution sequence.
  272. items []PipelineItem
  273. // The collection of parameters to create items.
  274. facts map[string]interface{}
  275. // Feature flags which enable the corresponding items.
  276. features map[string]bool
  277. }
  278. const FactPipelineCommits = "commits"
  279. func NewPipeline(repository *git.Repository) *Pipeline {
  280. return &Pipeline{
  281. repository: repository,
  282. items: []PipelineItem{},
  283. facts: map[string]interface{}{},
  284. features: map[string]bool{},
  285. }
  286. }
  287. func (pipeline *Pipeline) GetFact(name string) interface{} {
  288. return pipeline.facts[name]
  289. }
  290. func (pipeline *Pipeline) SetFact(name string, value interface{}) {
  291. pipeline.facts[name] = value
  292. }
  293. func (pipeline *Pipeline) GetFeature(name string) (bool, bool) {
  294. val, exists := pipeline.features[name]
  295. return val, exists
  296. }
  297. func (pipeline *Pipeline) SetFeature(name string) {
  298. pipeline.features[name] = true
  299. }
  300. func (pipeline *Pipeline) SetFeaturesFromFlags() {
  301. for _, feature := range featureFlags.Flags {
  302. pipeline.SetFeature(feature)
  303. }
  304. }
  305. func (pipeline *Pipeline) DeployItem(item PipelineItem) PipelineItem {
  306. queue := []PipelineItem{}
  307. queue = append(queue, item)
  308. added := map[string]PipelineItem{}
  309. for _, item := range pipeline.items {
  310. added[item.Name()] = item
  311. }
  312. added[item.Name()] = item
  313. pipeline.AddItem(item)
  314. for len(queue) > 0 {
  315. head := queue[0]
  316. queue = queue[1:]
  317. for _, dep := range head.Requires() {
  318. for _, sibling := range Registry.Summon(dep) {
  319. if _, exists := added[sibling.Name()]; !exists {
  320. disabled := false
  321. // If this item supports features, check them against the activated in pipeline.features
  322. if fpi, matches := sibling.(FeaturedPipelineItem); matches {
  323. for _, feature := range fpi.Features() {
  324. if !pipeline.features[feature] {
  325. disabled = true
  326. break
  327. }
  328. }
  329. }
  330. if disabled {
  331. continue
  332. }
  333. added[sibling.Name()] = sibling
  334. queue = append(queue, sibling)
  335. pipeline.AddItem(sibling)
  336. }
  337. }
  338. }
  339. }
  340. return item
  341. }
  342. func (pipeline *Pipeline) AddItem(item PipelineItem) PipelineItem {
  343. pipeline.items = append(pipeline.items, item)
  344. return item
  345. }
  346. func (pipeline *Pipeline) RemoveItem(item PipelineItem) {
  347. for i, reg := range pipeline.items {
  348. if reg == item {
  349. pipeline.items = append(pipeline.items[:i], pipeline.items[i+1:]...)
  350. return
  351. }
  352. }
  353. }
  354. func (pipeline *Pipeline) Len() int {
  355. return len(pipeline.items)
  356. }
  357. // Commits returns the critical path in the repository's history. It starts
  358. // from HEAD and traces commits backwards till the root. When it encounters
  359. // a merge (more than one parent), it always chooses the first parent.
  360. func (pipeline *Pipeline) Commits() []*object.Commit {
  361. result := []*object.Commit{}
  362. repository := pipeline.repository
  363. head, err := repository.Head()
  364. if err != nil {
  365. panic(err)
  366. }
  367. commit, err := repository.CommitObject(head.Hash())
  368. if err != nil {
  369. panic(err)
  370. }
  371. // the first parent matches the head
  372. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  373. if err != nil {
  374. panic(err)
  375. }
  376. result = append(result, commit)
  377. }
  378. // reverse the order
  379. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  380. result[i], result[j] = result[j], result[i]
  381. }
  382. return result
  383. }
  384. type sortablePipelineItems []PipelineItem
  385. func (items sortablePipelineItems) Len() int {
  386. return len(items)
  387. }
  388. func (items sortablePipelineItems) Less(i, j int) bool {
  389. return items[i].Name() < items[j].Name()
  390. }
  391. func (items sortablePipelineItems) Swap(i, j int) {
  392. items[i], items[j] = items[j], items[i]
  393. }
  394. func (pipeline *Pipeline) resolve(dumpPath string) {
  395. graph := toposort.NewGraph()
  396. sort.Sort(sortablePipelineItems(pipeline.items))
  397. name2item := map[string]PipelineItem{}
  398. ambiguousMap := map[string][]string{}
  399. nameUsages := map[string]int{}
  400. for _, item := range pipeline.items {
  401. nameUsages[item.Name()]++
  402. }
  403. counters := map[string]int{}
  404. for _, item := range pipeline.items {
  405. name := item.Name()
  406. if nameUsages[name] > 1 {
  407. index := counters[item.Name()] + 1
  408. counters[item.Name()] = index
  409. name = fmt.Sprintf("%s_%d", item.Name(), index)
  410. }
  411. graph.AddNode(name)
  412. name2item[name] = item
  413. for _, key := range item.Provides() {
  414. key = "[" + key + "]"
  415. graph.AddNode(key)
  416. if graph.AddEdge(name, key) > 1 {
  417. if ambiguousMap[key] != nil {
  418. fmt.Fprintln(os.Stderr, "Pipeline:")
  419. for _, item2 := range pipeline.items {
  420. if item2 == item {
  421. fmt.Fprint(os.Stderr, "> ")
  422. }
  423. fmt.Fprint(os.Stderr, item2.Name(), " [")
  424. for i, key2 := range item2.Provides() {
  425. fmt.Fprint(os.Stderr, key2)
  426. if i < len(item.Provides())-1 {
  427. fmt.Fprint(os.Stderr, ", ")
  428. }
  429. }
  430. fmt.Fprintln(os.Stderr, "]")
  431. }
  432. panic("Failed to resolve pipeline dependencies: ambiguous graph.")
  433. }
  434. ambiguousMap[key] = graph.FindParents(key)
  435. }
  436. }
  437. }
  438. counters = map[string]int{}
  439. for _, item := range pipeline.items {
  440. name := item.Name()
  441. if nameUsages[name] > 1 {
  442. index := counters[item.Name()] + 1
  443. counters[item.Name()] = index
  444. name = fmt.Sprintf("%s_%d", item.Name(), index)
  445. }
  446. for _, key := range item.Requires() {
  447. key = "[" + key + "]"
  448. if graph.AddEdge(key, name) == 0 {
  449. panic(fmt.Sprintf("Unsatisfied dependency: %s -> %s", key, item.Name()))
  450. }
  451. }
  452. }
  453. if len(ambiguousMap) > 0 {
  454. ambiguous := []string{}
  455. for key := range ambiguousMap {
  456. ambiguous = append(ambiguous, key)
  457. }
  458. sort.Strings(ambiguous)
  459. bfsorder := graph.BreadthSort()
  460. bfsindex := map[string]int{}
  461. for i, s := range bfsorder {
  462. bfsindex[s] = i
  463. }
  464. for len(ambiguous) > 0 {
  465. key := ambiguous[0]
  466. ambiguous = ambiguous[1:]
  467. pair := ambiguousMap[key]
  468. inheritor := pair[1]
  469. if bfsindex[pair[1]] < bfsindex[pair[0]] {
  470. inheritor = pair[0]
  471. }
  472. removed := graph.RemoveEdge(key, inheritor)
  473. cycle := map[string]bool{}
  474. for _, node := range graph.FindCycle(key) {
  475. cycle[node] = true
  476. }
  477. if len(cycle) == 0 {
  478. cycle[inheritor] = true
  479. }
  480. if removed {
  481. graph.AddEdge(key, inheritor)
  482. }
  483. graph.RemoveEdge(inheritor, key)
  484. graph.ReindexNode(inheritor)
  485. // for all nodes key links to except those in cycle, put the link from inheritor
  486. for _, node := range graph.FindChildren(key) {
  487. if _, exists := cycle[node]; !exists {
  488. graph.AddEdge(inheritor, node)
  489. graph.RemoveEdge(key, node)
  490. }
  491. }
  492. graph.ReindexNode(key)
  493. }
  494. }
  495. var graphCopy *toposort.Graph
  496. if dumpPath != "" {
  497. graphCopy = graph.Copy()
  498. }
  499. strplan, ok := graph.Toposort()
  500. if !ok {
  501. panic("Failed to resolve pipeline dependencies: unable to topologically sort the items.")
  502. }
  503. pipeline.items = make([]PipelineItem, 0, len(pipeline.items))
  504. for _, key := range strplan {
  505. if item, ok := name2item[key]; ok {
  506. pipeline.items = append(pipeline.items, item)
  507. }
  508. }
  509. if dumpPath != "" {
  510. // If there is a floating difference, uncomment this:
  511. // fmt.Fprint(os.Stderr, graphCopy.DebugDump())
  512. ioutil.WriteFile(dumpPath, []byte(graphCopy.Serialize(strplan)), 0666)
  513. absPath, _ := filepath.Abs(dumpPath)
  514. fmt.Fprintf(os.Stderr, "Wrote the DAG to %s\n", absPath)
  515. }
  516. }
  517. func (pipeline *Pipeline) Initialize(facts map[string]interface{}) {
  518. if facts == nil {
  519. facts = map[string]interface{}{}
  520. }
  521. if _, exists := facts[FactPipelineCommits]; !exists {
  522. facts[FactPipelineCommits] = pipeline.Commits()
  523. }
  524. dumpPath, _ := facts[ConfigPipelineDumpPath].(string)
  525. pipeline.resolve(dumpPath)
  526. if dryRun, _ := facts[ConfigPipelineDryRun].(bool); dryRun {
  527. return
  528. }
  529. for _, item := range pipeline.items {
  530. item.Configure(facts)
  531. }
  532. for _, item := range pipeline.items {
  533. item.Initialize(pipeline.repository)
  534. }
  535. }
  536. // Run method executes the pipeline.
  537. //
  538. // commits is a slice with the sequential commit history. It shall start from
  539. // the root (ascending order).
  540. //
  541. // Returns the mapping from each LeafPipelineItem to the corresponding analysis result.
  542. // There is always a "nil" record with CommonAnalysisResult.
  543. func (pipeline *Pipeline) Run(commits []*object.Commit) (map[LeafPipelineItem]interface{}, error) {
  544. startRunTime := time.Now()
  545. onProgress := pipeline.OnProgress
  546. if onProgress == nil {
  547. onProgress = func(int, int) {}
  548. }
  549. for index, commit := range commits {
  550. onProgress(index, len(commits))
  551. state := map[string]interface{}{"commit": commit, "index": index}
  552. for _, item := range pipeline.items {
  553. update, err := item.Consume(state)
  554. if err != nil {
  555. fmt.Fprintf(os.Stderr, "%s failed on commit #%d %s\n",
  556. item.Name(), index, commit.Hash.String())
  557. return nil, err
  558. }
  559. for _, key := range item.Provides() {
  560. val, ok := update[key]
  561. if !ok {
  562. panic(fmt.Sprintf("%s: Consume() did not return %s", item.Name(), key))
  563. }
  564. state[key] = val
  565. }
  566. }
  567. }
  568. onProgress(len(commits), len(commits))
  569. result := map[LeafPipelineItem]interface{}{}
  570. for _, item := range pipeline.items {
  571. if casted, ok := item.(LeafPipelineItem); ok {
  572. result[casted] = casted.Finalize()
  573. }
  574. }
  575. result[nil] = &CommonAnalysisResult{
  576. BeginTime: commits[0].Author.When.Unix(),
  577. EndTime: commits[len(commits)-1].Author.When.Unix(),
  578. CommitsNumber: len(commits),
  579. RunTime: time.Since(startRunTime),
  580. }
  581. return result, nil
  582. }
  583. func LoadCommitsFromFile(path string, repository *git.Repository) ([]*object.Commit, error) {
  584. var file io.ReadCloser
  585. if path != "-" {
  586. var err error
  587. file, err = os.Open(path)
  588. if err != nil {
  589. return nil, err
  590. }
  591. defer file.Close()
  592. } else {
  593. file = os.Stdin
  594. }
  595. scanner := bufio.NewScanner(file)
  596. commits := []*object.Commit{}
  597. for scanner.Scan() {
  598. hash := plumbing.NewHash(scanner.Text())
  599. if len(hash) != 20 {
  600. return nil, errors.New("invalid commit hash " + scanner.Text())
  601. }
  602. commit, err := repository.CommitObject(hash)
  603. if err != nil {
  604. return nil, err
  605. }
  606. commits = append(commits, commit)
  607. }
  608. return commits, nil
  609. }