uast.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. package uast
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "os"
  9. "path"
  10. "runtime"
  11. "strings"
  12. "sync"
  13. "time"
  14. "github.com/gogo/protobuf/proto"
  15. "github.com/jeffail/tunny"
  16. "gopkg.in/bblfsh/client-go.v2"
  17. "gopkg.in/bblfsh/sdk.v1/protocol"
  18. "gopkg.in/bblfsh/sdk.v1/uast"
  19. "gopkg.in/src-d/go-git.v4"
  20. "gopkg.in/src-d/go-git.v4/plumbing"
  21. "gopkg.in/src-d/go-git.v4/plumbing/object"
  22. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  23. "gopkg.in/src-d/hercules.v5/internal/core"
  24. "gopkg.in/src-d/hercules.v5/internal/pb"
  25. items "gopkg.in/src-d/hercules.v5/internal/plumbing"
  26. )
  27. // Extractor retrieves UASTs from Babelfish server which correspond to changed files in a commit.
  28. // It is a PipelineItem.
  29. type Extractor struct {
  30. core.NoopMerger
  31. Endpoint string
  32. Context func() (context.Context, context.CancelFunc)
  33. PoolSize int
  34. FailOnErrors bool
  35. ProcessedFiles map[string]int
  36. clients []*bblfsh.Client
  37. pool *tunny.Pool
  38. }
  39. const (
  40. // ConfigUASTEndpoint is the name of the configuration option (Extractor.Configure())
  41. // which sets the Babelfish server address.
  42. ConfigUASTEndpoint = "ConfigUASTEndpoint"
  43. // ConfigUASTTimeout is the name of the configuration option (Extractor.Configure())
  44. // which sets the maximum amount of time to wait for a Babelfish server response.
  45. ConfigUASTTimeout = "ConfigUASTTimeout"
  46. // ConfigUASTPoolSize is the name of the configuration option (Extractor.Configure())
  47. // which sets the number of goroutines to run for UAST parse queries.
  48. ConfigUASTPoolSize = "ConfigUASTPoolSize"
  49. // ConfigUASTFailOnErrors is the name of the configuration option (Extractor.Configure())
  50. // which enables early exit in case of any Babelfish UAST parsing errors.
  51. ConfigUASTFailOnErrors = "ConfigUASTFailOnErrors"
  52. // FeatureUast is the name of the Pipeline feature which activates all the items related to UAST.
  53. FeatureUast = "uast"
  54. // DependencyUasts is the name of the dependency provided by Extractor.
  55. DependencyUasts = "uasts"
  56. )
  57. type uastTask struct {
  58. Lock *sync.RWMutex
  59. Dest map[plumbing.Hash]*uast.Node
  60. Name string
  61. Hash plumbing.Hash
  62. Data []byte
  63. Errors *[]error
  64. }
  65. type worker struct {
  66. Client *bblfsh.Client
  67. Extractor *Extractor
  68. }
  69. // Process will synchronously perform a job and return the result.
  70. func (w worker) Process(data interface{}) interface{} {
  71. return w.Extractor.extractTask(w.Client, data)
  72. }
  73. func (w worker) BlockUntilReady() {}
  74. func (w worker) Interrupt() {}
  75. func (w worker) Terminate() {}
  76. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  77. func (exr *Extractor) Name() string {
  78. return "UAST"
  79. }
  80. // Provides returns the list of names of entities which are produced by this PipelineItem.
  81. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  82. // to this list. Also used by core.Registry to build the global map of providers.
  83. func (exr *Extractor) Provides() []string {
  84. arr := [...]string{DependencyUasts}
  85. return arr[:]
  86. }
  87. // Requires returns the list of names of entities which are needed by this PipelineItem.
  88. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  89. // entities are Provides() upstream.
  90. func (exr *Extractor) Requires() []string {
  91. arr := [...]string{items.DependencyTreeChanges, items.DependencyBlobCache}
  92. return arr[:]
  93. }
  94. // Features which must be enabled for this PipelineItem to be automatically inserted into the DAG.
  95. func (exr *Extractor) Features() []string {
  96. arr := [...]string{FeatureUast}
  97. return arr[:]
  98. }
  99. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  100. func (exr *Extractor) ListConfigurationOptions() []core.ConfigurationOption {
  101. options := [...]core.ConfigurationOption{{
  102. Name: ConfigUASTEndpoint,
  103. Description: "How many days there are in a single band.",
  104. Flag: "bblfsh",
  105. Type: core.StringConfigurationOption,
  106. Default: "0.0.0.0:9432"}, {
  107. Name: ConfigUASTTimeout,
  108. Description: "Babelfish's server timeout in seconds.",
  109. Flag: "bblfsh-timeout",
  110. Type: core.IntConfigurationOption,
  111. Default: 20}, {
  112. Name: ConfigUASTPoolSize,
  113. Description: "Number of goroutines to extract UASTs.",
  114. Flag: "bblfsh-pool-size",
  115. Type: core.IntConfigurationOption,
  116. Default: runtime.NumCPU() * 2}, {
  117. Name: ConfigUASTFailOnErrors,
  118. Description: "Panic if there is a UAST extraction error.",
  119. Flag: "bblfsh-fail-on-error",
  120. Type: core.BoolConfigurationOption,
  121. Default: false},
  122. }
  123. return options[:]
  124. }
  125. // Configure sets the properties previously published by ListConfigurationOptions().
  126. func (exr *Extractor) Configure(facts map[string]interface{}) {
  127. if val, exists := facts[ConfigUASTEndpoint].(string); exists {
  128. exr.Endpoint = val
  129. }
  130. if val, exists := facts[ConfigUASTTimeout].(int); exists {
  131. exr.Context = func() (context.Context, context.CancelFunc) {
  132. return context.WithTimeout(context.Background(),
  133. time.Duration(val)*time.Second)
  134. }
  135. }
  136. if val, exists := facts[ConfigUASTPoolSize].(int); exists {
  137. exr.PoolSize = val
  138. }
  139. if val, exists := facts[ConfigUASTFailOnErrors].(bool); exists {
  140. exr.FailOnErrors = val
  141. }
  142. }
  143. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  144. // calls. The repository which is going to be analysed is supplied as an argument.
  145. func (exr *Extractor) Initialize(repository *git.Repository) {
  146. if exr.Context == nil {
  147. exr.Context = func() (context.Context, context.CancelFunc) {
  148. return context.Background(), nil
  149. }
  150. }
  151. poolSize := exr.PoolSize
  152. if poolSize == 0 {
  153. poolSize = runtime.NumCPU()
  154. }
  155. exr.clients = make([]*bblfsh.Client, poolSize)
  156. for i := 0; i < poolSize; i++ {
  157. client, err := bblfsh.NewClient(exr.Endpoint)
  158. if err != nil {
  159. panic(err)
  160. }
  161. exr.clients[i] = client
  162. }
  163. if exr.pool != nil {
  164. exr.pool.Close()
  165. }
  166. {
  167. i := 0
  168. exr.pool = tunny.New(poolSize, func() tunny.Worker {
  169. w := worker{Client: exr.clients[i], Extractor: exr}
  170. i++
  171. return w
  172. })
  173. }
  174. if exr.pool == nil {
  175. panic("UAST goroutine pool was not created")
  176. }
  177. exr.ProcessedFiles = map[string]int{}
  178. }
  179. // Consume runs this PipelineItem on the next commit data.
  180. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  181. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  182. // This function returns the mapping with analysis results. The keys must be the same as
  183. // in Provides(). If there was an error, nil is returned.
  184. func (exr *Extractor) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  185. cache := deps[items.DependencyBlobCache].(map[plumbing.Hash]*items.CachedBlob)
  186. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  187. uasts := map[plumbing.Hash]*uast.Node{}
  188. lock := sync.RWMutex{}
  189. errs := make([]error, 0)
  190. wg := sync.WaitGroup{}
  191. submit := func(change *object.Change) {
  192. exr.ProcessedFiles[change.To.Name]++
  193. wg.Add(1)
  194. go func(task interface{}) {
  195. exr.pool.Process(task)
  196. wg.Done()
  197. }(uastTask{
  198. Lock: &lock,
  199. Dest: uasts,
  200. Name: change.To.Name,
  201. Hash: change.To.TreeEntry.Hash,
  202. Data: cache[change.To.TreeEntry.Hash].Data,
  203. Errors: &errs,
  204. })
  205. }
  206. for _, change := range treeDiffs {
  207. action, err := change.Action()
  208. if err != nil {
  209. return nil, err
  210. }
  211. switch action {
  212. case merkletrie.Insert:
  213. submit(change)
  214. case merkletrie.Delete:
  215. continue
  216. case merkletrie.Modify:
  217. submit(change)
  218. }
  219. }
  220. wg.Wait()
  221. if len(errs) > 0 {
  222. msgs := make([]string, len(errs))
  223. for i, err := range errs {
  224. msgs[i] = err.Error()
  225. }
  226. joined := strings.Join(msgs, "\n")
  227. if exr.FailOnErrors {
  228. return nil, errors.New(joined)
  229. }
  230. fmt.Fprintln(os.Stderr, joined)
  231. }
  232. return map[string]interface{}{DependencyUasts: uasts}, nil
  233. }
  234. // Fork clones this PipelineItem.
  235. func (exr *Extractor) Fork(n int) []core.PipelineItem {
  236. return core.ForkSamePipelineItem(exr, n)
  237. }
  238. func (exr *Extractor) extractUAST(
  239. client *bblfsh.Client, name string, data []byte) (*uast.Node, error) {
  240. request := client.NewParseRequest()
  241. request.Content(string(data))
  242. request.Filename(name)
  243. ctx, cancel := exr.Context()
  244. if cancel != nil {
  245. defer cancel()
  246. }
  247. response, err := request.DoWithContext(ctx)
  248. if err != nil {
  249. if strings.Contains("missing driver", err.Error()) {
  250. return nil, nil
  251. }
  252. return nil, err
  253. }
  254. if response.Status != protocol.Ok {
  255. return nil, errors.New(strings.Join(response.Errors, "\n"))
  256. }
  257. if err != nil {
  258. return nil, err
  259. }
  260. return response.UAST, nil
  261. }
  262. func (exr *Extractor) extractTask(client *bblfsh.Client, data interface{}) interface{} {
  263. task := data.(uastTask)
  264. node, err := exr.extractUAST(client, task.Name, task.Data)
  265. task.Lock.Lock()
  266. defer task.Lock.Unlock()
  267. if err != nil {
  268. *task.Errors = append(*task.Errors,
  269. fmt.Errorf("\nfile %s, blob %s: %v", task.Name, task.Hash.String(), err))
  270. return nil
  271. }
  272. if node != nil {
  273. task.Dest[task.Hash] = node
  274. }
  275. return nil
  276. }
  277. // Change is the type of the items in the list of changes which is provided by Changes.
  278. type Change struct {
  279. Before *uast.Node
  280. After *uast.Node
  281. Change *object.Change
  282. }
  283. const (
  284. // DependencyUastChanges is the name of the dependency provided by Changes.
  285. DependencyUastChanges = "changed_uasts"
  286. )
  287. // Changes is a structured analog of TreeDiff: it provides UASTs for every logical change
  288. // in a commit. It is a PipelineItem.
  289. type Changes struct {
  290. core.NoopMerger
  291. cache map[plumbing.Hash]*uast.Node
  292. }
  293. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  294. func (uc *Changes) Name() string {
  295. return "UASTChanges"
  296. }
  297. // Provides returns the list of names of entities which are produced by this PipelineItem.
  298. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  299. // to this list. Also used by core.Registry to build the global map of providers.
  300. func (uc *Changes) Provides() []string {
  301. arr := [...]string{DependencyUastChanges}
  302. return arr[:]
  303. }
  304. // Requires returns the list of names of entities which are needed by this PipelineItem.
  305. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  306. // entities are Provides() upstream.
  307. func (uc *Changes) Requires() []string {
  308. arr := [...]string{DependencyUasts, items.DependencyTreeChanges}
  309. return arr[:]
  310. }
  311. // Features which must be enabled for this PipelineItem to be automatically inserted into the DAG.
  312. func (uc *Changes) Features() []string {
  313. arr := [...]string{FeatureUast}
  314. return arr[:]
  315. }
  316. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  317. func (uc *Changes) ListConfigurationOptions() []core.ConfigurationOption {
  318. return []core.ConfigurationOption{}
  319. }
  320. // Configure sets the properties previously published by ListConfigurationOptions().
  321. func (uc *Changes) Configure(facts map[string]interface{}) {}
  322. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  323. // calls. The repository which is going to be analysed is supplied as an argument.
  324. func (uc *Changes) Initialize(repository *git.Repository) {
  325. uc.cache = map[plumbing.Hash]*uast.Node{}
  326. }
  327. // Consume runs this PipelineItem on the next commit data.
  328. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  329. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  330. // This function returns the mapping with analysis results. The keys must be the same as
  331. // in Provides(). If there was an error, nil is returned.
  332. func (uc *Changes) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  333. uasts := deps[DependencyUasts].(map[plumbing.Hash]*uast.Node)
  334. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  335. commit := make([]Change, 0, len(treeDiffs))
  336. for _, change := range treeDiffs {
  337. action, err := change.Action()
  338. if err != nil {
  339. return nil, err
  340. }
  341. switch action {
  342. case merkletrie.Insert:
  343. hashTo := change.To.TreeEntry.Hash
  344. uastTo := uasts[hashTo]
  345. commit = append(commit, Change{Before: nil, After: uastTo, Change: change})
  346. uc.cache[hashTo] = uastTo
  347. case merkletrie.Delete:
  348. hashFrom := change.From.TreeEntry.Hash
  349. commit = append(commit, Change{Before: uc.cache[hashFrom], After: nil, Change: change})
  350. delete(uc.cache, hashFrom)
  351. case merkletrie.Modify:
  352. hashFrom := change.From.TreeEntry.Hash
  353. hashTo := change.To.TreeEntry.Hash
  354. uastTo := uasts[hashTo]
  355. commit = append(commit, Change{Before: uc.cache[hashFrom], After: uastTo, Change: change})
  356. delete(uc.cache, hashFrom)
  357. uc.cache[hashTo] = uastTo
  358. }
  359. }
  360. return map[string]interface{}{DependencyUastChanges: commit}, nil
  361. }
  362. // Fork clones this PipelineItem.
  363. func (uc *Changes) Fork(n int) []core.PipelineItem {
  364. ucs := make([]core.PipelineItem, n)
  365. for i := 0; i < n; i++ {
  366. clone := &Changes{
  367. cache: map[plumbing.Hash]*uast.Node{},
  368. }
  369. for key, val := range uc.cache {
  370. clone.cache[key] = val
  371. }
  372. ucs[i] = clone
  373. }
  374. return ucs
  375. }
  376. // ChangesSaver dumps changed files and corresponding UASTs for every commit.
  377. // it is a LeafPipelineItem.
  378. type ChangesSaver struct {
  379. core.NoopMerger
  380. core.OneShotMergeProcessor
  381. // OutputPath points to the target directory with UASTs
  382. OutputPath string
  383. repository *git.Repository
  384. result [][]Change
  385. }
  386. const (
  387. // ConfigUASTChangesSaverOutputPath is the name of the configuration option
  388. // (ChangesSaver.Configure()) which sets the target directory where to save the files.
  389. ConfigUASTChangesSaverOutputPath = "ChangesSaver.OutputPath"
  390. )
  391. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  392. func (saver *ChangesSaver) Name() string {
  393. return "UASTChangesSaver"
  394. }
  395. // Provides returns the list of names of entities which are produced by this PipelineItem.
  396. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  397. // to this list. Also used by core.Registry to build the global map of providers.
  398. func (saver *ChangesSaver) Provides() []string {
  399. return []string{}
  400. }
  401. // Requires returns the list of names of entities which are needed by this PipelineItem.
  402. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  403. // entities are Provides() upstream.
  404. func (saver *ChangesSaver) Requires() []string {
  405. arr := [...]string{DependencyUastChanges}
  406. return arr[:]
  407. }
  408. // Features which must be enabled for this PipelineItem to be automatically inserted into the DAG.
  409. func (saver *ChangesSaver) Features() []string {
  410. arr := [...]string{FeatureUast}
  411. return arr[:]
  412. }
  413. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  414. func (saver *ChangesSaver) ListConfigurationOptions() []core.ConfigurationOption {
  415. options := [...]core.ConfigurationOption{{
  416. Name: ConfigUASTChangesSaverOutputPath,
  417. Description: "The target directory where to store the changed UAST files.",
  418. Flag: "changed-uast-dir",
  419. Type: core.StringConfigurationOption,
  420. Default: "."},
  421. }
  422. return options[:]
  423. }
  424. // Flag for the command line switch which enables this analysis.
  425. func (saver *ChangesSaver) Flag() string {
  426. return "dump-uast-changes"
  427. }
  428. // Description returns the text which explains what the analysis is doing.
  429. func (saver *ChangesSaver) Description() string {
  430. return "Saves UASTs and file contents on disk for each commit."
  431. }
  432. // Configure sets the properties previously published by ListConfigurationOptions().
  433. func (saver *ChangesSaver) Configure(facts map[string]interface{}) {
  434. if val, exists := facts[ConfigUASTChangesSaverOutputPath]; exists {
  435. saver.OutputPath = val.(string)
  436. }
  437. }
  438. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  439. // calls. The repository which is going to be analysed is supplied as an argument.
  440. func (saver *ChangesSaver) Initialize(repository *git.Repository) {
  441. saver.repository = repository
  442. saver.result = [][]Change{}
  443. saver.OneShotMergeProcessor.Initialize()
  444. }
  445. // Consume runs this PipelineItem on the next commit data.
  446. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  447. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  448. // This function returns the mapping with analysis results. The keys must be the same as
  449. // in Provides(). If there was an error, nil is returned.
  450. func (saver *ChangesSaver) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  451. if !saver.ShouldConsumeCommit(deps) {
  452. return nil, nil
  453. }
  454. changes := deps[DependencyUastChanges].([]Change)
  455. saver.result = append(saver.result, changes)
  456. return nil, nil
  457. }
  458. // Finalize returns the result of the analysis. Further Consume() calls are not expected.
  459. func (saver *ChangesSaver) Finalize() interface{} {
  460. return saver.result
  461. }
  462. // Fork clones this PipelineItem.
  463. func (saver *ChangesSaver) Fork(n int) []core.PipelineItem {
  464. return core.ForkSamePipelineItem(saver, n)
  465. }
  466. // Serialize converts the analysis result as returned by Finalize() to text or bytes.
  467. // The text format is YAML and the bytes format is Protocol Buffers.
  468. func (saver *ChangesSaver) Serialize(result interface{}, binary bool, writer io.Writer) error {
  469. saverResult := result.([][]Change)
  470. fileNames := saver.dumpFiles(saverResult)
  471. if binary {
  472. return saver.serializeBinary(fileNames, writer)
  473. }
  474. saver.serializeText(fileNames, writer)
  475. return nil
  476. }
  477. func (saver *ChangesSaver) dumpFiles(result [][]Change) []*pb.UASTChange {
  478. fileNames := []*pb.UASTChange{}
  479. for i, changes := range result {
  480. for j, change := range changes {
  481. if change.Before == nil || change.After == nil {
  482. continue
  483. }
  484. record := &pb.UASTChange{FileName: change.Change.To.Name}
  485. bs, _ := change.Before.Marshal()
  486. record.UastBefore = path.Join(saver.OutputPath, fmt.Sprintf(
  487. "%d_%d_before_%s.pb", i, j, change.Change.From.TreeEntry.Hash.String()))
  488. ioutil.WriteFile(record.UastBefore, bs, 0666)
  489. blob, _ := saver.repository.BlobObject(change.Change.From.TreeEntry.Hash)
  490. s, _ := (&object.File{Blob: *blob}).Contents()
  491. record.SrcBefore = path.Join(saver.OutputPath, fmt.Sprintf(
  492. "%d_%d_before_%s.src", i, j, change.Change.From.TreeEntry.Hash.String()))
  493. ioutil.WriteFile(record.SrcBefore, []byte(s), 0666)
  494. bs, _ = change.After.Marshal()
  495. record.UastAfter = path.Join(saver.OutputPath, fmt.Sprintf(
  496. "%d_%d_after_%s.pb", i, j, change.Change.To.TreeEntry.Hash.String()))
  497. ioutil.WriteFile(record.UastAfter, bs, 0666)
  498. blob, _ = saver.repository.BlobObject(change.Change.To.TreeEntry.Hash)
  499. s, _ = (&object.File{Blob: *blob}).Contents()
  500. record.SrcAfter = path.Join(saver.OutputPath, fmt.Sprintf(
  501. "%d_%d_after_%s.src", i, j, change.Change.To.TreeEntry.Hash.String()))
  502. ioutil.WriteFile(record.SrcAfter, []byte(s), 0666)
  503. fileNames = append(fileNames, record)
  504. }
  505. }
  506. return fileNames
  507. }
  508. func (saver *ChangesSaver) serializeText(result []*pb.UASTChange, writer io.Writer) {
  509. for _, sc := range result {
  510. kv := [...]string{
  511. "file: " + sc.FileName,
  512. "src0: " + sc.SrcBefore, "src1: " + sc.SrcAfter,
  513. "uast0: " + sc.UastBefore, "uast1: " + sc.UastAfter,
  514. }
  515. fmt.Fprintf(writer, " - {%s}\n", strings.Join(kv[:], ", "))
  516. }
  517. }
  518. func (saver *ChangesSaver) serializeBinary(result []*pb.UASTChange, writer io.Writer) error {
  519. message := pb.UASTChangesSaverResults{Changes: result}
  520. serialized, err := proto.Marshal(&message)
  521. if err != nil {
  522. return err
  523. }
  524. writer.Write(serialized)
  525. return nil
  526. }
  527. func init() {
  528. core.Registry.Register(&Extractor{})
  529. core.Registry.Register(&Changes{})
  530. core.Registry.Register(&ChangesSaver{})
  531. }