pipeline.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. package hercules
  2. import (
  3. "bufio"
  4. "errors"
  5. "flag"
  6. "fmt"
  7. "io"
  8. "io/ioutil"
  9. "os"
  10. "path/filepath"
  11. "reflect"
  12. "sort"
  13. "strings"
  14. "time"
  15. "unsafe"
  16. "gopkg.in/src-d/go-git.v4"
  17. "gopkg.in/src-d/go-git.v4/plumbing"
  18. "gopkg.in/src-d/go-git.v4/plumbing/object"
  19. "gopkg.in/src-d/hercules.v3/pb"
  20. "gopkg.in/src-d/hercules.v3/toposort"
  21. )
  22. type ConfigurationOptionType int
  23. const (
  24. // Boolean value type.
  25. BoolConfigurationOption ConfigurationOptionType = iota
  26. // Integer value type.
  27. IntConfigurationOption
  28. // String value type.
  29. StringConfigurationOption
  30. )
  31. const (
  32. ConfigPipelineDumpPath = "Pipeline.DumpPath"
  33. ConfigPipelineDryRun = "Pipeline.DryRun"
  34. )
  35. // ConfigurationOption allows for the unified, retrospective way to setup PipelineItem-s.
  36. type ConfigurationOption struct {
  37. // Name identifies the configuration option in facts.
  38. Name string
  39. // Description represents the help text about the configuration option.
  40. Description string
  41. // Flag corresponds to the CLI token with "-" prepended.
  42. Flag string
  43. // Type specifies the kind of the configuration option's value.
  44. Type ConfigurationOptionType
  45. // Default is the initial value of the configuration option.
  46. Default interface{}
  47. }
  48. // PipelineItem is the interface for all the units of the Git commit analysis pipeline.
  49. type PipelineItem interface {
  50. // Name returns the name of the analysis.
  51. Name() string
  52. // Provides returns the list of keys of reusable calculated entities.
  53. // Other items may depend on them.
  54. Provides() []string
  55. // Requires returns the list of keys of needed entities which must be supplied in Consume().
  56. Requires() []string
  57. // ListConfigurationOptions returns the list of available options which can be consumed by Configure().
  58. ListConfigurationOptions() []ConfigurationOption
  59. // Configure performs the initial setup of the object by applying parameters from facts.
  60. // It allows to create PipelineItems in a universal way.
  61. Configure(facts map[string]interface{})
  62. // Initialize prepares and resets the item. Consume() requires Initialize()
  63. // to be called at least once beforehand.
  64. Initialize(*git.Repository)
  65. // Consume processes the next commit.
  66. // deps contains the required entities which match Depends(). Besides, it always includes
  67. // "commit" and "index".
  68. // Returns the calculated entities which match Provides().
  69. Consume(deps map[string]interface{}) (map[string]interface{}, error)
  70. }
  71. // FeaturedPipelineItem enables switching the automatic insertion of pipeline items on or off.
  72. type FeaturedPipelineItem interface {
  73. PipelineItem
  74. // Features returns the list of names which enable this item to be automatically inserted
  75. // in Pipeline.DeployItem().
  76. Features() []string
  77. }
  78. // LeafPipelineItem corresponds to the top level pipeline items which produce the end results.
  79. type LeafPipelineItem interface {
  80. PipelineItem
  81. // Flag returns the cmdline name of the item.
  82. Flag() string
  83. // Finalize returns the result of the analysis.
  84. Finalize() interface{}
  85. // Serialize encodes the object returned by Finalize() to YAML or Protocol Buffers.
  86. Serialize(result interface{}, binary bool, writer io.Writer) error
  87. }
  88. // MergeablePipelineItem specifies the methods to combine several analysis results together.
  89. type MergeablePipelineItem interface {
  90. LeafPipelineItem
  91. // Deserialize loads the result from Protocol Buffers blob.
  92. Deserialize(pbmessage []byte) (interface{}, error)
  93. // MergeResults joins two results together. Common-s are specified as the global state.
  94. MergeResults(r1, r2 interface{}, c1, c2 *CommonAnalysisResult) interface{}
  95. }
  96. // CommonAnalysisResult holds the information which is always extracted at Pipeline.Run().
  97. type CommonAnalysisResult struct {
  98. // Time of the first commit in the analysed sequence.
  99. BeginTime int64
  100. // Time of the last commit in the analysed sequence.
  101. EndTime int64
  102. // The number of commits in the analysed sequence.
  103. CommitsNumber int
  104. // The duration of Pipeline.Run().
  105. RunTime time.Duration
  106. }
  107. func (car *CommonAnalysisResult) BeginTimeAsTime() time.Time {
  108. return time.Unix(car.BeginTime, 0)
  109. }
  110. func (car *CommonAnalysisResult) EndTimeAsTime() time.Time {
  111. return time.Unix(car.EndTime, 0)
  112. }
  113. func (car *CommonAnalysisResult) Merge(other *CommonAnalysisResult) {
  114. if other.BeginTime < car.BeginTime {
  115. car.BeginTime = other.BeginTime
  116. }
  117. if other.EndTime > car.EndTime {
  118. car.EndTime = other.EndTime
  119. }
  120. car.CommitsNumber += other.CommitsNumber
  121. car.RunTime += other.RunTime
  122. }
  123. func (car *CommonAnalysisResult) FillMetadata(meta *pb.Metadata) *pb.Metadata {
  124. meta.BeginUnixTime = car.BeginTime
  125. meta.EndUnixTime = car.EndTime
  126. meta.Commits = int32(car.CommitsNumber)
  127. meta.RunTime = car.RunTime.Nanoseconds() / 1e6
  128. return meta
  129. }
  130. func MetadataToCommonAnalysisResult(meta *pb.Metadata) *CommonAnalysisResult {
  131. return &CommonAnalysisResult{
  132. BeginTime: meta.BeginUnixTime,
  133. EndTime: meta.EndUnixTime,
  134. CommitsNumber: int(meta.Commits),
  135. RunTime: time.Duration(meta.RunTime * 1e6),
  136. }
  137. }
  138. // PipelineItemRegistry contains all the known PipelineItem-s.
  139. type PipelineItemRegistry struct {
  140. provided map[string][]reflect.Type
  141. registered map[string]reflect.Type
  142. flags map[string]reflect.Type
  143. }
  144. // Register adds another PipelineItem to the registry.
  145. func (registry *PipelineItemRegistry) Register(example PipelineItem) {
  146. t := reflect.TypeOf(example)
  147. registry.registered[example.Name()] = t
  148. if fpi, ok := example.(LeafPipelineItem); ok {
  149. registry.flags[fpi.Flag()] = t
  150. }
  151. for _, dep := range example.Provides() {
  152. ts := registry.provided[dep]
  153. if ts == nil {
  154. ts = []reflect.Type{}
  155. }
  156. ts = append(ts, t)
  157. registry.provided[dep] = ts
  158. }
  159. }
  160. func (registry *PipelineItemRegistry) Summon(providesOrName string) []PipelineItem {
  161. if registry.provided == nil {
  162. return []PipelineItem{}
  163. }
  164. ts := registry.provided[providesOrName]
  165. items := []PipelineItem{}
  166. for _, t := range ts {
  167. items = append(items, reflect.New(t.Elem()).Interface().(PipelineItem))
  168. }
  169. if t, exists := registry.registered[providesOrName]; exists {
  170. items = append(items, reflect.New(t.Elem()).Interface().(PipelineItem))
  171. }
  172. return items
  173. }
  174. type arrayFeatureFlags struct {
  175. // Flags containts the features activated through the command line.
  176. Flags []string
  177. // Choices contains all registered features.
  178. Choices map[string]bool
  179. }
  180. func (acf *arrayFeatureFlags) String() string {
  181. return strings.Join([]string(acf.Flags), ", ")
  182. }
  183. func (acf *arrayFeatureFlags) Set(value string) error {
  184. if _, exists := acf.Choices[value]; !exists {
  185. return errors.New(fmt.Sprintf("Feature \"%s\" is not registered.", value))
  186. }
  187. acf.Flags = append(acf.Flags, value)
  188. return nil
  189. }
  190. var featureFlags = arrayFeatureFlags{Flags: []string{}, Choices: map[string]bool{}}
  191. // AddFlags inserts the cmdline options from PipelineItem.ListConfigurationOptions(),
  192. // FeaturedPipelineItem().Features() and LeafPipelineItem.Flag() into the global "flag" parser
  193. // built into the Go runtime.
  194. // Returns the "facts" which can be fed into PipelineItem.Configure() and the dictionary of
  195. // runnable analysis (LeafPipelineItem) choices. E.g. if "BurndownAnalysis" was activated
  196. // through "-burndown" cmdline argument, this mapping would contain ["BurndownAnalysis"] = *true.
  197. func (registry *PipelineItemRegistry) AddFlags() (map[string]interface{}, map[string]*bool) {
  198. flags := map[string]interface{}{}
  199. deployed := map[string]*bool{}
  200. for name, it := range registry.registered {
  201. formatHelp := func(desc string) string {
  202. return fmt.Sprintf("%s [%s]", desc, name)
  203. }
  204. itemIface := reflect.New(it.Elem()).Interface()
  205. for _, opt := range itemIface.(PipelineItem).ListConfigurationOptions() {
  206. var iface interface{}
  207. switch opt.Type {
  208. case BoolConfigurationOption:
  209. iface = interface{}(true)
  210. ptr := (**bool)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  211. *ptr = flag.Bool(opt.Flag, opt.Default.(bool), formatHelp(opt.Description))
  212. case IntConfigurationOption:
  213. iface = interface{}(0)
  214. ptr := (**int)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  215. *ptr = flag.Int(opt.Flag, opt.Default.(int), formatHelp(opt.Description))
  216. case StringConfigurationOption:
  217. iface = interface{}("")
  218. ptr := (**string)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  219. *ptr = flag.String(opt.Flag, opt.Default.(string), formatHelp(opt.Description))
  220. }
  221. flags[opt.Name] = iface
  222. }
  223. if fpi, ok := itemIface.(FeaturedPipelineItem); ok {
  224. for _, f := range fpi.Features() {
  225. featureFlags.Choices[f] = true
  226. }
  227. }
  228. if fpi, ok := itemIface.(LeafPipelineItem); ok {
  229. deployed[fpi.Name()] = flag.Bool(
  230. fpi.Flag(), false, fmt.Sprintf("Runs %s analysis.", fpi.Name()))
  231. }
  232. }
  233. {
  234. // Pipeline flags
  235. iface := interface{}("")
  236. ptr1 := (**string)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  237. *ptr1 = flag.String("dump-dag", "", "Write the pipeline DAG to a Graphviz file.")
  238. flags[ConfigPipelineDumpPath] = iface
  239. iface = interface{}(true)
  240. ptr2 := (**bool)(unsafe.Pointer(uintptr(unsafe.Pointer(&iface)) + unsafe.Sizeof(&iface)))
  241. *ptr2 = flag.Bool("dry-run", false, "Do not run any analyses - only resolve the DAG. "+
  242. "Useful for -dump-dag.")
  243. flags[ConfigPipelineDryRun] = iface
  244. }
  245. features := []string{}
  246. for f := range featureFlags.Choices {
  247. features = append(features, f)
  248. }
  249. flag.Var(&featureFlags, "feature",
  250. fmt.Sprintf("Enables specific analysis features, can be specified "+
  251. "multiple times. Available features: [%s].", strings.Join(features, ", ")))
  252. return flags, deployed
  253. }
  254. // Registry contains all known pipeline item types.
  255. var Registry = &PipelineItemRegistry{
  256. provided: map[string][]reflect.Type{},
  257. registered: map[string]reflect.Type{},
  258. flags: map[string]reflect.Type{},
  259. }
  260. type Pipeline struct {
  261. // OnProgress is the callback which is invoked in Analyse() to output it's
  262. // progress. The first argument is the number of processed commits and the
  263. // second is the total number of commits.
  264. OnProgress func(int, int)
  265. // Repository points to the analysed Git repository struct from go-git.
  266. repository *git.Repository
  267. // Items are the registered building blocks in the pipeline. The order defines the
  268. // execution sequence.
  269. items []PipelineItem
  270. // The collection of parameters to create items.
  271. facts map[string]interface{}
  272. // Feature flags which enable the corresponding items.
  273. features map[string]bool
  274. }
  275. const FactPipelineCommits = "commits"
  276. func NewPipeline(repository *git.Repository) *Pipeline {
  277. return &Pipeline{
  278. repository: repository,
  279. items: []PipelineItem{},
  280. facts: map[string]interface{}{},
  281. features: map[string]bool{},
  282. }
  283. }
  284. func (pipeline *Pipeline) GetFact(name string) interface{} {
  285. return pipeline.facts[name]
  286. }
  287. func (pipeline *Pipeline) SetFact(name string, value interface{}) {
  288. pipeline.facts[name] = value
  289. }
  290. func (pipeline *Pipeline) GetFeature(name string) (bool, bool) {
  291. val, exists := pipeline.features[name]
  292. return val, exists
  293. }
  294. func (pipeline *Pipeline) SetFeature(name string) {
  295. pipeline.features[name] = true
  296. }
  297. func (pipeline *Pipeline) SetFeaturesFromFlags() {
  298. for _, feature := range featureFlags.Flags {
  299. pipeline.SetFeature(feature)
  300. }
  301. }
  302. func (pipeline *Pipeline) DeployItem(item PipelineItem) PipelineItem {
  303. queue := []PipelineItem{}
  304. queue = append(queue, item)
  305. added := map[string]PipelineItem{}
  306. for _, item := range pipeline.items {
  307. added[item.Name()] = item
  308. }
  309. added[item.Name()] = item
  310. pipeline.AddItem(item)
  311. for len(queue) > 0 {
  312. head := queue[0]
  313. queue = queue[1:]
  314. for _, dep := range head.Requires() {
  315. for _, sibling := range Registry.Summon(dep) {
  316. if _, exists := added[sibling.Name()]; !exists {
  317. disabled := false
  318. // If this item supports features, check them against the activated in pipeline.features
  319. if fpi, matches := sibling.(FeaturedPipelineItem); matches {
  320. for _, feature := range fpi.Features() {
  321. if !pipeline.features[feature] {
  322. disabled = true
  323. break
  324. }
  325. }
  326. }
  327. if disabled {
  328. continue
  329. }
  330. added[sibling.Name()] = sibling
  331. queue = append(queue, sibling)
  332. pipeline.AddItem(sibling)
  333. }
  334. }
  335. }
  336. }
  337. return item
  338. }
  339. func (pipeline *Pipeline) AddItem(item PipelineItem) PipelineItem {
  340. pipeline.items = append(pipeline.items, item)
  341. return item
  342. }
  343. func (pipeline *Pipeline) RemoveItem(item PipelineItem) {
  344. for i, reg := range pipeline.items {
  345. if reg == item {
  346. pipeline.items = append(pipeline.items[:i], pipeline.items[i+1:]...)
  347. return
  348. }
  349. }
  350. }
  351. func (pipeline *Pipeline) Len() int {
  352. return len(pipeline.items)
  353. }
  354. // Commits returns the critical path in the repository's history. It starts
  355. // from HEAD and traces commits backwards till the root. When it encounters
  356. // a merge (more than one parent), it always chooses the first parent.
  357. func (pipeline *Pipeline) Commits() []*object.Commit {
  358. result := []*object.Commit{}
  359. repository := pipeline.repository
  360. head, err := repository.Head()
  361. if err != nil {
  362. panic(err)
  363. }
  364. commit, err := repository.CommitObject(head.Hash())
  365. if err != nil {
  366. panic(err)
  367. }
  368. // the first parent matches the head
  369. for ; err != io.EOF; commit, err = commit.Parents().Next() {
  370. if err != nil {
  371. panic(err)
  372. }
  373. result = append(result, commit)
  374. }
  375. // reverse the order
  376. for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
  377. result[i], result[j] = result[j], result[i]
  378. }
  379. return result
  380. }
  381. type sortablePipelineItems []PipelineItem
  382. func (items sortablePipelineItems) Len() int {
  383. return len(items)
  384. }
  385. func (items sortablePipelineItems) Less(i, j int) bool {
  386. return items[i].Name() < items[j].Name()
  387. }
  388. func (items sortablePipelineItems) Swap(i, j int) {
  389. items[i], items[j] = items[j], items[i]
  390. }
  391. func (pipeline *Pipeline) resolve(dumpPath string) {
  392. graph := toposort.NewGraph()
  393. sort.Sort(sortablePipelineItems(pipeline.items))
  394. name2item := map[string]PipelineItem{}
  395. ambiguousMap := map[string][]string{}
  396. nameUsages := map[string]int{}
  397. for _, item := range pipeline.items {
  398. nameUsages[item.Name()]++
  399. }
  400. counters := map[string]int{}
  401. for _, item := range pipeline.items {
  402. name := item.Name()
  403. if nameUsages[name] > 1 {
  404. index := counters[item.Name()] + 1
  405. counters[item.Name()] = index
  406. name = fmt.Sprintf("%s_%d", item.Name(), index)
  407. }
  408. graph.AddNode(name)
  409. name2item[name] = item
  410. for _, key := range item.Provides() {
  411. key = "[" + key + "]"
  412. graph.AddNode(key)
  413. if graph.AddEdge(name, key) > 1 {
  414. if ambiguousMap[key] != nil {
  415. fmt.Fprintln(os.Stderr, "Pipeline:")
  416. for _, item2 := range pipeline.items {
  417. if item2 == item {
  418. fmt.Fprint(os.Stderr, "> ")
  419. }
  420. fmt.Fprint(os.Stderr, item2.Name(), " [")
  421. for i, key2 := range item2.Provides() {
  422. fmt.Fprint(os.Stderr, key2)
  423. if i < len(item.Provides())-1 {
  424. fmt.Fprint(os.Stderr, ", ")
  425. }
  426. }
  427. fmt.Fprintln(os.Stderr, "]")
  428. }
  429. panic("Failed to resolve pipeline dependencies: ambiguous graph.")
  430. }
  431. ambiguousMap[key] = graph.FindParents(key)
  432. }
  433. }
  434. }
  435. counters = map[string]int{}
  436. for _, item := range pipeline.items {
  437. name := item.Name()
  438. if nameUsages[name] > 1 {
  439. index := counters[item.Name()] + 1
  440. counters[item.Name()] = index
  441. name = fmt.Sprintf("%s_%d", item.Name(), index)
  442. }
  443. for _, key := range item.Requires() {
  444. key = "[" + key + "]"
  445. if graph.AddEdge(key, name) == 0 {
  446. panic(fmt.Sprintf("Unsatisfied dependency: %s -> %s", key, item.Name()))
  447. }
  448. }
  449. }
  450. if len(ambiguousMap) > 0 {
  451. ambiguous := []string{}
  452. for key := range ambiguousMap {
  453. ambiguous = append(ambiguous, key)
  454. }
  455. sort.Strings(ambiguous)
  456. bfsorder := graph.BreadthSort()
  457. bfsindex := map[string]int{}
  458. for i, s := range bfsorder {
  459. bfsindex[s] = i
  460. }
  461. for len(ambiguous) > 0 {
  462. key := ambiguous[0]
  463. ambiguous = ambiguous[1:]
  464. pair := ambiguousMap[key]
  465. inheritor := pair[1]
  466. if bfsindex[pair[1]] < bfsindex[pair[0]] {
  467. inheritor = pair[0]
  468. }
  469. removed := graph.RemoveEdge(key, inheritor)
  470. cycle := map[string]bool{}
  471. for _, node := range graph.FindCycle(key) {
  472. cycle[node] = true
  473. }
  474. if len(cycle) == 0 {
  475. cycle[inheritor] = true
  476. }
  477. if removed {
  478. graph.AddEdge(key, inheritor)
  479. }
  480. graph.RemoveEdge(inheritor, key)
  481. graph.ReindexNode(inheritor)
  482. // for all nodes key links to except those in cycle, put the link from inheritor
  483. for _, node := range graph.FindChildren(key) {
  484. if _, exists := cycle[node]; !exists {
  485. graph.AddEdge(inheritor, node)
  486. graph.RemoveEdge(key, node)
  487. }
  488. }
  489. graph.ReindexNode(key)
  490. }
  491. }
  492. var graphCopy *toposort.Graph
  493. if dumpPath != "" {
  494. graphCopy = graph.Copy()
  495. }
  496. strplan, ok := graph.Toposort()
  497. if !ok {
  498. panic("Failed to resolve pipeline dependencies: unable to topologically sort the items.")
  499. }
  500. pipeline.items = make([]PipelineItem, 0, len(pipeline.items))
  501. for _, key := range strplan {
  502. if item, ok := name2item[key]; ok {
  503. pipeline.items = append(pipeline.items, item)
  504. }
  505. }
  506. if dumpPath != "" {
  507. // If there is a floating difference, uncomment this:
  508. // fmt.Fprint(os.Stderr, graphCopy.DebugDump())
  509. ioutil.WriteFile(dumpPath, []byte(graphCopy.Serialize(strplan)), 0666)
  510. absPath, _ := filepath.Abs(dumpPath)
  511. fmt.Fprintf(os.Stderr, "Wrote the DAG to %s\n", absPath)
  512. }
  513. }
  514. func (pipeline *Pipeline) Initialize(facts map[string]interface{}) {
  515. if facts == nil {
  516. facts = map[string]interface{}{}
  517. }
  518. if _, exists := facts[FactPipelineCommits]; !exists {
  519. facts[FactPipelineCommits] = pipeline.Commits()
  520. }
  521. dumpPath, _ := facts[ConfigPipelineDumpPath].(string)
  522. pipeline.resolve(dumpPath)
  523. if dryRun, _ := facts[ConfigPipelineDryRun].(bool); dryRun {
  524. return
  525. }
  526. for _, item := range pipeline.items {
  527. item.Configure(facts)
  528. }
  529. for _, item := range pipeline.items {
  530. item.Initialize(pipeline.repository)
  531. }
  532. }
  533. // Run method executes the pipeline.
  534. //
  535. // commits is a slice with the sequential commit history. It shall start from
  536. // the root (ascending order).
  537. //
  538. // Returns the mapping from each LeafPipelineItem to the corresponding analysis result.
  539. // There is always a "nil" record with CommonAnalysisResult.
  540. func (pipeline *Pipeline) Run(commits []*object.Commit) (map[LeafPipelineItem]interface{}, error) {
  541. startRunTime := time.Now()
  542. onProgress := pipeline.OnProgress
  543. if onProgress == nil {
  544. onProgress = func(int, int) {}
  545. }
  546. for index, commit := range commits {
  547. onProgress(index, len(commits))
  548. state := map[string]interface{}{"commit": commit, "index": index}
  549. for _, item := range pipeline.items {
  550. update, err := item.Consume(state)
  551. if err != nil {
  552. fmt.Fprintf(os.Stderr, "%s failed on commit #%d %s\n",
  553. item.Name(), index, commit.Hash.String())
  554. return nil, err
  555. }
  556. for _, key := range item.Provides() {
  557. val, ok := update[key]
  558. if !ok {
  559. panic(fmt.Sprintf("%s: Consume() did not return %s", item.Name(), key))
  560. }
  561. state[key] = val
  562. }
  563. }
  564. }
  565. onProgress(len(commits), len(commits))
  566. result := map[LeafPipelineItem]interface{}{}
  567. for _, item := range pipeline.items {
  568. if casted, ok := item.(LeafPipelineItem); ok {
  569. result[casted] = casted.Finalize()
  570. }
  571. }
  572. result[nil] = &CommonAnalysisResult{
  573. BeginTime: commits[0].Author.When.Unix(),
  574. EndTime: commits[len(commits)-1].Author.When.Unix(),
  575. CommitsNumber: len(commits),
  576. RunTime: time.Since(startRunTime),
  577. }
  578. return result, nil
  579. }
  580. func LoadCommitsFromFile(path string, repository *git.Repository) ([]*object.Commit, error) {
  581. var file io.ReadCloser
  582. if path != "-" {
  583. var err error
  584. file, err = os.Open(path)
  585. if err != nil {
  586. return nil, err
  587. }
  588. defer file.Close()
  589. } else {
  590. file = os.Stdin
  591. }
  592. scanner := bufio.NewScanner(file)
  593. commits := []*object.Commit{}
  594. for scanner.Scan() {
  595. hash := plumbing.NewHash(scanner.Text())
  596. if len(hash) != 20 {
  597. return nil, errors.New("invalid commit hash " + scanner.Text())
  598. }
  599. commit, err := repository.CommitObject(hash)
  600. if err != nil {
  601. return nil, err
  602. }
  603. commits = append(commits, commit)
  604. }
  605. return commits, nil
  606. }