uast.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. package uast
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "io/ioutil"
  8. "log"
  9. "os"
  10. "path"
  11. "runtime"
  12. "strings"
  13. "sync"
  14. "time"
  15. "github.com/gogo/protobuf/proto"
  16. "github.com/jeffail/tunny"
  17. "gopkg.in/bblfsh/client-go.v2"
  18. "gopkg.in/bblfsh/sdk.v1/protocol"
  19. "gopkg.in/bblfsh/sdk.v1/uast"
  20. "gopkg.in/src-d/go-git.v4"
  21. "gopkg.in/src-d/go-git.v4/plumbing"
  22. "gopkg.in/src-d/go-git.v4/plumbing/object"
  23. "gopkg.in/src-d/go-git.v4/utils/merkletrie"
  24. "gopkg.in/src-d/hercules.v7/internal/core"
  25. "gopkg.in/src-d/hercules.v7/internal/pb"
  26. items "gopkg.in/src-d/hercules.v7/internal/plumbing"
  27. )
  28. // Extractor retrieves UASTs from Babelfish server which correspond to changed files in a commit.
  29. // It is a PipelineItem.
  30. type Extractor struct {
  31. core.NoopMerger
  32. Endpoint string
  33. Context func() (context.Context, context.CancelFunc)
  34. PoolSize int
  35. FailOnErrors bool
  36. ProcessedFiles map[string]int
  37. clients []*bblfsh.Client
  38. pool *tunny.Pool
  39. }
  40. const (
  41. // ConfigUASTEndpoint is the name of the configuration option (Extractor.Configure())
  42. // which sets the Babelfish server address.
  43. ConfigUASTEndpoint = "ConfigUASTEndpoint"
  44. // ConfigUASTTimeout is the name of the configuration option (Extractor.Configure())
  45. // which sets the maximum amount of time to wait for a Babelfish server response.
  46. ConfigUASTTimeout = "ConfigUASTTimeout"
  47. // ConfigUASTPoolSize is the name of the configuration option (Extractor.Configure())
  48. // which sets the number of goroutines to run for UAST parse queries.
  49. ConfigUASTPoolSize = "ConfigUASTPoolSize"
  50. // ConfigUASTFailOnErrors is the name of the configuration option (Extractor.Configure())
  51. // which enables early exit in case of any Babelfish UAST parsing errors.
  52. ConfigUASTFailOnErrors = "ConfigUASTFailOnErrors"
  53. // FeatureUast is the name of the Pipeline feature which activates all the items related to UAST.
  54. FeatureUast = "uast"
  55. // DependencyUasts is the name of the dependency provided by Extractor.
  56. DependencyUasts = "uasts"
  57. )
  58. type uastTask struct {
  59. Lock *sync.RWMutex
  60. Dest map[plumbing.Hash]*uast.Node
  61. Name string
  62. Hash plumbing.Hash
  63. Data []byte
  64. Errors *[]error
  65. }
  66. type worker struct {
  67. Client *bblfsh.Client
  68. Extractor *Extractor
  69. }
  70. // Process will synchronously perform a job and return the result.
  71. func (w worker) Process(data interface{}) interface{} {
  72. return w.Extractor.extractTask(w.Client, data)
  73. }
  74. func (w worker) BlockUntilReady() {}
  75. func (w worker) Interrupt() {}
  76. func (w worker) Terminate() {}
  77. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  78. func (exr *Extractor) Name() string {
  79. return "UAST"
  80. }
  81. // Provides returns the list of names of entities which are produced by this PipelineItem.
  82. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  83. // to this list. Also used by core.Registry to build the global map of providers.
  84. func (exr *Extractor) Provides() []string {
  85. arr := [...]string{DependencyUasts}
  86. return arr[:]
  87. }
  88. // Requires returns the list of names of entities which are needed by this PipelineItem.
  89. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  90. // entities are Provides() upstream.
  91. func (exr *Extractor) Requires() []string {
  92. arr := [...]string{items.DependencyTreeChanges, items.DependencyBlobCache}
  93. return arr[:]
  94. }
  95. // Features which must be enabled for this PipelineItem to be automatically inserted into the DAG.
  96. func (exr *Extractor) Features() []string {
  97. arr := [...]string{FeatureUast}
  98. return arr[:]
  99. }
  100. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  101. func (exr *Extractor) ListConfigurationOptions() []core.ConfigurationOption {
  102. options := [...]core.ConfigurationOption{{
  103. Name: ConfigUASTEndpoint,
  104. Description: "How many days there are in a single band.",
  105. Flag: "bblfsh",
  106. Type: core.StringConfigurationOption,
  107. Default: "0.0.0.0:9432"}, {
  108. Name: ConfigUASTTimeout,
  109. Description: "Babelfish's server timeout in seconds.",
  110. Flag: "bblfsh-timeout",
  111. Type: core.IntConfigurationOption,
  112. Default: 20}, {
  113. Name: ConfigUASTPoolSize,
  114. Description: "Number of goroutines to extract UASTs.",
  115. Flag: "bblfsh-pool-size",
  116. Type: core.IntConfigurationOption,
  117. Default: runtime.NumCPU() * 2}, {
  118. Name: ConfigUASTFailOnErrors,
  119. Description: "Panic if there is a UAST extraction error.",
  120. Flag: "bblfsh-fail-on-error",
  121. Type: core.BoolConfigurationOption,
  122. Default: false},
  123. }
  124. return options[:]
  125. }
  126. // Configure sets the properties previously published by ListConfigurationOptions().
  127. func (exr *Extractor) Configure(facts map[string]interface{}) error {
  128. if val, exists := facts[ConfigUASTEndpoint].(string); exists {
  129. exr.Endpoint = val
  130. }
  131. if val, exists := facts[ConfigUASTTimeout].(int); exists {
  132. exr.Context = func() (context.Context, context.CancelFunc) {
  133. return context.WithTimeout(context.Background(),
  134. time.Duration(val)*time.Second)
  135. }
  136. }
  137. if val, exists := facts[ConfigUASTPoolSize].(int); exists {
  138. exr.PoolSize = val
  139. }
  140. if val, exists := facts[ConfigUASTFailOnErrors].(bool); exists {
  141. exr.FailOnErrors = val
  142. }
  143. return nil
  144. }
  145. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  146. // calls. The repository which is going to be analysed is supplied as an argument.
  147. func (exr *Extractor) Initialize(repository *git.Repository) error {
  148. if exr.Context == nil {
  149. exr.Context = func() (context.Context, context.CancelFunc) {
  150. return context.Background(), nil
  151. }
  152. }
  153. poolSize := exr.PoolSize
  154. if poolSize == 0 {
  155. poolSize = runtime.NumCPU()
  156. }
  157. exr.clients = make([]*bblfsh.Client, poolSize)
  158. for i := 0; i < poolSize; i++ {
  159. client, err := bblfsh.NewClient(exr.Endpoint)
  160. if err != nil {
  161. if err.Error() == "context deadline exceeded" {
  162. log.Println("Looks like the Babelfish server is not running. Please refer " +
  163. "to https://docs.sourced.tech/babelfish/using-babelfish/getting-started#running-with-docker-recommended")
  164. }
  165. return err
  166. }
  167. exr.clients[i] = client
  168. }
  169. if exr.pool != nil {
  170. exr.pool.Close()
  171. }
  172. {
  173. i := 0
  174. exr.pool = tunny.New(poolSize, func() tunny.Worker {
  175. w := worker{Client: exr.clients[i], Extractor: exr}
  176. i++
  177. return w
  178. })
  179. }
  180. if exr.pool == nil {
  181. panic("UAST goroutine pool was not created")
  182. }
  183. exr.ProcessedFiles = map[string]int{}
  184. return nil
  185. }
  186. // Consume runs this PipelineItem on the next commit data.
  187. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  188. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  189. // This function returns the mapping with analysis results. The keys must be the same as
  190. // in Provides(). If there was an error, nil is returned.
  191. func (exr *Extractor) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  192. cache := deps[items.DependencyBlobCache].(map[plumbing.Hash]*items.CachedBlob)
  193. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  194. uasts := map[plumbing.Hash]*uast.Node{}
  195. lock := sync.RWMutex{}
  196. errs := make([]error, 0)
  197. wg := sync.WaitGroup{}
  198. submit := func(change *object.Change) {
  199. exr.ProcessedFiles[change.To.Name]++
  200. wg.Add(1)
  201. go func(task interface{}) {
  202. exr.pool.Process(task)
  203. wg.Done()
  204. }(uastTask{
  205. Lock: &lock,
  206. Dest: uasts,
  207. Name: change.To.Name,
  208. Hash: change.To.TreeEntry.Hash,
  209. Data: cache[change.To.TreeEntry.Hash].Data,
  210. Errors: &errs,
  211. })
  212. }
  213. for _, change := range treeDiffs {
  214. action, err := change.Action()
  215. if err != nil {
  216. return nil, err
  217. }
  218. switch action {
  219. case merkletrie.Insert:
  220. submit(change)
  221. case merkletrie.Delete:
  222. continue
  223. case merkletrie.Modify:
  224. submit(change)
  225. }
  226. }
  227. wg.Wait()
  228. if len(errs) > 0 {
  229. msgs := make([]string, len(errs))
  230. for i, err := range errs {
  231. msgs[i] = err.Error()
  232. }
  233. joined := strings.Join(msgs, "\n")
  234. if exr.FailOnErrors {
  235. return nil, errors.New(joined)
  236. }
  237. fmt.Fprintln(os.Stderr, joined)
  238. }
  239. return map[string]interface{}{DependencyUasts: uasts}, nil
  240. }
  241. // Fork clones this PipelineItem.
  242. func (exr *Extractor) Fork(n int) []core.PipelineItem {
  243. return core.ForkSamePipelineItem(exr, n)
  244. }
  245. func (exr *Extractor) extractUAST(
  246. client *bblfsh.Client, name string, data []byte) (*uast.Node, error) {
  247. request := client.NewParseRequest()
  248. request.Content(string(data))
  249. request.Filename(name)
  250. ctx, cancel := exr.Context()
  251. if cancel != nil {
  252. defer cancel()
  253. }
  254. response, err := request.DoWithContext(ctx)
  255. if err != nil {
  256. if strings.Contains("missing driver", err.Error()) {
  257. return nil, nil
  258. }
  259. return nil, err
  260. }
  261. if response.Status != protocol.Ok {
  262. return nil, errors.New(strings.Join(response.Errors, "\n"))
  263. }
  264. if err != nil {
  265. return nil, err
  266. }
  267. return response.UAST, nil
  268. }
  269. func (exr *Extractor) extractTask(client *bblfsh.Client, data interface{}) interface{} {
  270. task := data.(uastTask)
  271. node, err := exr.extractUAST(client, task.Name, task.Data)
  272. task.Lock.Lock()
  273. defer task.Lock.Unlock()
  274. if err != nil {
  275. *task.Errors = append(*task.Errors,
  276. fmt.Errorf("\nfile %s, blob %s: %v", task.Name, task.Hash.String(), err))
  277. return nil
  278. }
  279. if node != nil {
  280. task.Dest[task.Hash] = node
  281. }
  282. return nil
  283. }
  284. // Change is the type of the items in the list of changes which is provided by Changes.
  285. type Change struct {
  286. Before *uast.Node
  287. After *uast.Node
  288. Change *object.Change
  289. }
  290. const (
  291. // DependencyUastChanges is the name of the dependency provided by Changes.
  292. DependencyUastChanges = "changed_uasts"
  293. )
  294. // Changes is a structured analog of TreeDiff: it provides UASTs for every logical change
  295. // in a commit. It is a PipelineItem.
  296. type Changes struct {
  297. core.NoopMerger
  298. cache map[plumbing.Hash]*uast.Node
  299. }
  300. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  301. func (uc *Changes) Name() string {
  302. return "UASTChanges"
  303. }
  304. // Provides returns the list of names of entities which are produced by this PipelineItem.
  305. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  306. // to this list. Also used by core.Registry to build the global map of providers.
  307. func (uc *Changes) Provides() []string {
  308. arr := [...]string{DependencyUastChanges}
  309. return arr[:]
  310. }
  311. // Requires returns the list of names of entities which are needed by this PipelineItem.
  312. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  313. // entities are Provides() upstream.
  314. func (uc *Changes) Requires() []string {
  315. arr := [...]string{DependencyUasts, items.DependencyTreeChanges}
  316. return arr[:]
  317. }
  318. // Features which must be enabled for this PipelineItem to be automatically inserted into the DAG.
  319. func (uc *Changes) Features() []string {
  320. arr := [...]string{FeatureUast}
  321. return arr[:]
  322. }
  323. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  324. func (uc *Changes) ListConfigurationOptions() []core.ConfigurationOption {
  325. return []core.ConfigurationOption{}
  326. }
  327. // Configure sets the properties previously published by ListConfigurationOptions().
  328. func (uc *Changes) Configure(facts map[string]interface{}) error {
  329. return nil
  330. }
  331. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  332. // calls. The repository which is going to be analysed is supplied as an argument.
  333. func (uc *Changes) Initialize(repository *git.Repository) error {
  334. uc.cache = map[plumbing.Hash]*uast.Node{}
  335. return nil
  336. }
  337. // Consume runs this PipelineItem on the next commit data.
  338. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  339. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  340. // This function returns the mapping with analysis results. The keys must be the same as
  341. // in Provides(). If there was an error, nil is returned.
  342. func (uc *Changes) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  343. uasts := deps[DependencyUasts].(map[plumbing.Hash]*uast.Node)
  344. treeDiffs := deps[items.DependencyTreeChanges].(object.Changes)
  345. commit := make([]Change, 0, len(treeDiffs))
  346. for _, change := range treeDiffs {
  347. action, err := change.Action()
  348. if err != nil {
  349. return nil, err
  350. }
  351. switch action {
  352. case merkletrie.Insert:
  353. hashTo := change.To.TreeEntry.Hash
  354. uastTo := uasts[hashTo]
  355. commit = append(commit, Change{Before: nil, After: uastTo, Change: change})
  356. uc.cache[hashTo] = uastTo
  357. case merkletrie.Delete:
  358. hashFrom := change.From.TreeEntry.Hash
  359. commit = append(commit, Change{Before: uc.cache[hashFrom], After: nil, Change: change})
  360. delete(uc.cache, hashFrom)
  361. case merkletrie.Modify:
  362. hashFrom := change.From.TreeEntry.Hash
  363. hashTo := change.To.TreeEntry.Hash
  364. uastTo := uasts[hashTo]
  365. commit = append(commit, Change{Before: uc.cache[hashFrom], After: uastTo, Change: change})
  366. delete(uc.cache, hashFrom)
  367. uc.cache[hashTo] = uastTo
  368. }
  369. }
  370. return map[string]interface{}{DependencyUastChanges: commit}, nil
  371. }
  372. // Fork clones this PipelineItem.
  373. func (uc *Changes) Fork(n int) []core.PipelineItem {
  374. ucs := make([]core.PipelineItem, n)
  375. for i := 0; i < n; i++ {
  376. clone := &Changes{
  377. cache: map[plumbing.Hash]*uast.Node{},
  378. }
  379. for key, val := range uc.cache {
  380. clone.cache[key] = val
  381. }
  382. ucs[i] = clone
  383. }
  384. return ucs
  385. }
  386. // ChangesSaver dumps changed files and corresponding UASTs for every commit.
  387. // it is a LeafPipelineItem.
  388. type ChangesSaver struct {
  389. core.NoopMerger
  390. core.OneShotMergeProcessor
  391. // OutputPath points to the target directory with UASTs
  392. OutputPath string
  393. repository *git.Repository
  394. result [][]Change
  395. }
  396. const (
  397. // ConfigUASTChangesSaverOutputPath is the name of the configuration option
  398. // (ChangesSaver.Configure()) which sets the target directory where to save the files.
  399. ConfigUASTChangesSaverOutputPath = "ChangesSaver.OutputPath"
  400. )
  401. // Name of this PipelineItem. Uniquely identifies the type, used for mapping keys, etc.
  402. func (saver *ChangesSaver) Name() string {
  403. return "UASTChangesSaver"
  404. }
  405. // Provides returns the list of names of entities which are produced by this PipelineItem.
  406. // Each produced entity will be inserted into `deps` of dependent Consume()-s according
  407. // to this list. Also used by core.Registry to build the global map of providers.
  408. func (saver *ChangesSaver) Provides() []string {
  409. return []string{}
  410. }
  411. // Requires returns the list of names of entities which are needed by this PipelineItem.
  412. // Each requested entity will be inserted into `deps` of Consume(). In turn, those
  413. // entities are Provides() upstream.
  414. func (saver *ChangesSaver) Requires() []string {
  415. arr := [...]string{DependencyUastChanges}
  416. return arr[:]
  417. }
  418. // Features which must be enabled for this PipelineItem to be automatically inserted into the DAG.
  419. func (saver *ChangesSaver) Features() []string {
  420. arr := [...]string{FeatureUast}
  421. return arr[:]
  422. }
  423. // ListConfigurationOptions returns the list of changeable public properties of this PipelineItem.
  424. func (saver *ChangesSaver) ListConfigurationOptions() []core.ConfigurationOption {
  425. options := [...]core.ConfigurationOption{{
  426. Name: ConfigUASTChangesSaverOutputPath,
  427. Description: "The target directory where to store the changed UAST files.",
  428. Flag: "changed-uast-dir",
  429. Type: core.PathConfigurationOption,
  430. Default: "."},
  431. }
  432. return options[:]
  433. }
  434. // Flag for the command line switch which enables this analysis.
  435. func (saver *ChangesSaver) Flag() string {
  436. return "dump-uast-changes"
  437. }
  438. // Description returns the text which explains what the analysis is doing.
  439. func (saver *ChangesSaver) Description() string {
  440. return "Saves UASTs and file contents on disk for each commit."
  441. }
  442. // Configure sets the properties previously published by ListConfigurationOptions().
  443. func (saver *ChangesSaver) Configure(facts map[string]interface{}) error {
  444. if val, exists := facts[ConfigUASTChangesSaverOutputPath]; exists {
  445. saver.OutputPath = val.(string)
  446. }
  447. return nil
  448. }
  449. // Initialize resets the temporary caches and prepares this PipelineItem for a series of Consume()
  450. // calls. The repository which is going to be analysed is supplied as an argument.
  451. func (saver *ChangesSaver) Initialize(repository *git.Repository) error {
  452. saver.repository = repository
  453. saver.result = [][]Change{}
  454. saver.OneShotMergeProcessor.Initialize()
  455. return nil
  456. }
  457. // Consume runs this PipelineItem on the next commit data.
  458. // `deps` contain all the results from upstream PipelineItem-s as requested by Requires().
  459. // Additionally, DependencyCommit is always present there and represents the analysed *object.Commit.
  460. // This function returns the mapping with analysis results. The keys must be the same as
  461. // in Provides(). If there was an error, nil is returned.
  462. func (saver *ChangesSaver) Consume(deps map[string]interface{}) (map[string]interface{}, error) {
  463. if !saver.ShouldConsumeCommit(deps) {
  464. return nil, nil
  465. }
  466. changes := deps[DependencyUastChanges].([]Change)
  467. saver.result = append(saver.result, changes)
  468. return nil, nil
  469. }
  470. // Finalize returns the result of the analysis. Further Consume() calls are not expected.
  471. func (saver *ChangesSaver) Finalize() interface{} {
  472. return saver.result
  473. }
  474. // Fork clones this PipelineItem.
  475. func (saver *ChangesSaver) Fork(n int) []core.PipelineItem {
  476. return core.ForkSamePipelineItem(saver, n)
  477. }
  478. // Serialize converts the analysis result as returned by Finalize() to text or bytes.
  479. // The text format is YAML and the bytes format is Protocol Buffers.
  480. func (saver *ChangesSaver) Serialize(result interface{}, binary bool, writer io.Writer) error {
  481. saverResult := result.([][]Change)
  482. fileNames := saver.dumpFiles(saverResult)
  483. if binary {
  484. return saver.serializeBinary(fileNames, writer)
  485. }
  486. saver.serializeText(fileNames, writer)
  487. return nil
  488. }
  489. func (saver *ChangesSaver) dumpFiles(result [][]Change) []*pb.UASTChange {
  490. fileNames := []*pb.UASTChange{}
  491. for i, changes := range result {
  492. for j, change := range changes {
  493. if change.Before == nil || change.After == nil {
  494. continue
  495. }
  496. record := &pb.UASTChange{FileName: change.Change.To.Name}
  497. bs, _ := change.Before.Marshal()
  498. record.UastBefore = path.Join(saver.OutputPath, fmt.Sprintf(
  499. "%d_%d_before_%s.pb", i, j, change.Change.From.TreeEntry.Hash.String()))
  500. ioutil.WriteFile(record.UastBefore, bs, 0666)
  501. blob, _ := saver.repository.BlobObject(change.Change.From.TreeEntry.Hash)
  502. s, _ := (&object.File{Blob: *blob}).Contents()
  503. record.SrcBefore = path.Join(saver.OutputPath, fmt.Sprintf(
  504. "%d_%d_before_%s.src", i, j, change.Change.From.TreeEntry.Hash.String()))
  505. ioutil.WriteFile(record.SrcBefore, []byte(s), 0666)
  506. bs, _ = change.After.Marshal()
  507. record.UastAfter = path.Join(saver.OutputPath, fmt.Sprintf(
  508. "%d_%d_after_%s.pb", i, j, change.Change.To.TreeEntry.Hash.String()))
  509. ioutil.WriteFile(record.UastAfter, bs, 0666)
  510. blob, _ = saver.repository.BlobObject(change.Change.To.TreeEntry.Hash)
  511. s, _ = (&object.File{Blob: *blob}).Contents()
  512. record.SrcAfter = path.Join(saver.OutputPath, fmt.Sprintf(
  513. "%d_%d_after_%s.src", i, j, change.Change.To.TreeEntry.Hash.String()))
  514. ioutil.WriteFile(record.SrcAfter, []byte(s), 0666)
  515. fileNames = append(fileNames, record)
  516. }
  517. }
  518. return fileNames
  519. }
  520. func (saver *ChangesSaver) serializeText(result []*pb.UASTChange, writer io.Writer) {
  521. for _, sc := range result {
  522. kv := [...]string{
  523. "file: " + sc.FileName,
  524. "src0: " + sc.SrcBefore, "src1: " + sc.SrcAfter,
  525. "uast0: " + sc.UastBefore, "uast1: " + sc.UastAfter,
  526. }
  527. fmt.Fprintf(writer, " - {%s}\n", strings.Join(kv[:], ", "))
  528. }
  529. }
  530. func (saver *ChangesSaver) serializeBinary(result []*pb.UASTChange, writer io.Writer) error {
  531. message := pb.UASTChangesSaverResults{Changes: result}
  532. serialized, err := proto.Marshal(&message)
  533. if err != nil {
  534. return err
  535. }
  536. _, err = writer.Write(serialized)
  537. return err
  538. }
  539. func init() {
  540. core.Registry.Register(&Extractor{})
  541. core.Registry.Register(&Changes{})
  542. core.Registry.Register(&ChangesSaver{})
  543. }