labours.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. import argparse
  2. from datetime import datetime, timedelta
  3. import io
  4. import os
  5. import re
  6. import sys
  7. import tempfile
  8. import warnings
  9. try:
  10. from clint.textui import progress
  11. except ImportError:
  12. print("Warning: clint is not installed, no fancy progressbars in the terminal for you.")
  13. progress = None
  14. import numpy
  15. import yaml
  16. if sys.version_info[0] < 3:
  17. # OK, ancients, I will support Python 2, but you owe me a beer
  18. input = raw_input
  19. def parse_args():
  20. parser = argparse.ArgumentParser()
  21. parser.add_argument("-o", "--output", default="",
  22. help="Path to the output file/directory (empty for display).")
  23. parser.add_argument("-i", "--input", default="-",
  24. help="Path to the input file (- for stdin).")
  25. parser.add_argument("--text-size", default=12, type=int,
  26. help="Size of the labels and legend.")
  27. parser.add_argument("--backend", help="Matplotlib backend to use.")
  28. parser.add_argument("--style", choices=["black", "white"], default="black",
  29. help="Plot's general color scheme.")
  30. parser.add_argument("--relative", action="store_true",
  31. help="Occupy 100%% height for every measurement.")
  32. parser.add_argument("--couples-tmp-dir", help="Temporary directory to work with couples.")
  33. parser.add_argument("-m", "--mode",
  34. choices=["project", "file", "person", "matrix", "people", "couples",
  35. "all"],
  36. default="project", help="What to plot.")
  37. parser.add_argument(
  38. "--resample", default="year",
  39. help="The way to resample the time series. Possible values are: "
  40. "\"month\", \"year\", \"no\", \"raw\" and pandas offset aliases ("
  41. "http://pandas.pydata.org/pandas-docs/stable/timeseries.html"
  42. "#offset-aliases).")
  43. args = parser.parse_args()
  44. return args
  45. def read_input(args):
  46. sys.stdout.write("Reading the input... ")
  47. sys.stdout.flush()
  48. yaml.reader.Reader.NON_PRINTABLE = re.compile(r"(?!x)x")
  49. try:
  50. loader = yaml.CLoader
  51. except AttributeError:
  52. loader = yaml.Loader
  53. if args.input != "-":
  54. with open(args.input) as fin:
  55. data = yaml.load(fin, Loader=loader)
  56. else:
  57. data = yaml.load(sys.stdin, Loader=loader)
  58. print("done")
  59. return data["burndown"], data["project"], data.get("files"), data.get("people_sequence"), \
  60. data.get("people"), data.get("people_interaction"), data.get("files_coocc"), \
  61. data.get("people_coocc")
  62. def calculate_average_lifetime(matrix):
  63. lifetimes = numpy.zeros(matrix.shape[1] - 1)
  64. for band in matrix:
  65. start = 0
  66. for i, line in enumerate(band):
  67. if i == 0 or band[i - 1] == 0:
  68. start += 1
  69. continue
  70. lifetimes[i - start] = band[i - 1] - line
  71. lifetimes[i - start] = band[i - 1]
  72. return (lifetimes.dot(numpy.arange(1, matrix.shape[1], 1))
  73. / (lifetimes.sum() * matrix.shape[1]))
  74. def load_main(header, name, matrix, resample):
  75. import pandas
  76. start = header["begin"]
  77. last = header["end"]
  78. granularity = header["granularity"]
  79. sampling = header["sampling"]
  80. start = datetime.fromtimestamp(int(start))
  81. last = datetime.fromtimestamp(int(last))
  82. granularity = int(granularity)
  83. sampling = int(sampling)
  84. matrix = numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
  85. for line in matrix.split("\n")]).T
  86. print(name, "lifetime index:", calculate_average_lifetime(matrix))
  87. finish = start + timedelta(days=matrix.shape[1] * sampling)
  88. if resample not in ("no", "raw"):
  89. # Interpolate the day x day matrix.
  90. # Each day brings equal weight in the granularity.
  91. # Sampling's interpolation is linear.
  92. daily_matrix = numpy.zeros(
  93. (matrix.shape[0] * granularity, matrix.shape[1] * sampling),
  94. dtype=numpy.float32)
  95. epsrange = numpy.arange(0, 1, 1.0 / sampling)
  96. for y in range(matrix.shape[0]):
  97. for x in range(matrix.shape[1]):
  98. previous = matrix[y, x - 1] if x > 0 else 0
  99. value = ((previous + (matrix[y, x] - previous) * epsrange)
  100. / granularity)[numpy.newaxis, :]
  101. if (y + 1) * granularity <= x * sampling:
  102. daily_matrix[y * granularity:(y + 1) * granularity,
  103. x * sampling:(x + 1) * sampling] = value
  104. elif y * granularity <= (x + 1) * sampling:
  105. for suby in range(y * granularity, (y + 1) * granularity):
  106. for subx in range(suby, (x + 1) * sampling):
  107. daily_matrix[suby, subx] = matrix[
  108. y, x] / granularity
  109. daily_matrix[(last - start).days:] = 0
  110. # Resample the bands
  111. aliases = {
  112. "year": "A",
  113. "month": "M"
  114. }
  115. resample = aliases.get(resample, resample)
  116. periods = 0
  117. date_granularity_sampling = [start]
  118. while date_granularity_sampling[-1] < finish:
  119. periods += 1
  120. date_granularity_sampling = pandas.date_range(
  121. start, periods=periods, freq=resample)
  122. date_range_sampling = pandas.date_range(
  123. date_granularity_sampling[0],
  124. periods=(finish - date_granularity_sampling[0]).days,
  125. freq="1D")
  126. # Fill the new square matrix
  127. matrix = numpy.zeros(
  128. (len(date_granularity_sampling), len(date_range_sampling)),
  129. dtype=numpy.float32)
  130. for i, gdt in enumerate(date_granularity_sampling):
  131. istart = (date_granularity_sampling[i - 1] - start).days \
  132. if i > 0 else 0
  133. ifinish = (gdt - start).days
  134. for j, sdt in enumerate(date_range_sampling):
  135. if (sdt - start).days >= istart:
  136. break
  137. matrix[i, j:] = \
  138. daily_matrix[istart:ifinish, (sdt - start).days:].sum(axis=0)
  139. # Hardcode some cases to improve labels" readability
  140. if resample in ("year", "A"):
  141. labels = [dt.year for dt in date_granularity_sampling]
  142. elif resample in ("month", "M"):
  143. labels = [dt.strftime("%Y %B") for dt in date_granularity_sampling]
  144. else:
  145. labels = [dt.date() for dt in date_granularity_sampling]
  146. else:
  147. labels = [
  148. "%s - %s" % ((start + timedelta(days=i * granularity)).date(),
  149. (
  150. start + timedelta(days=(i + 1) * granularity)).date())
  151. for i in range(matrix.shape[0])]
  152. if len(labels) > 18:
  153. warnings.warn("Too many labels - consider resampling.")
  154. resample = "M" # fake resampling type is checked while plotting
  155. date_range_sampling = pandas.date_range(
  156. start + timedelta(days=sampling), periods=matrix.shape[1],
  157. freq="%dD" % sampling)
  158. return name, matrix, date_range_sampling, labels, granularity, sampling, resample
  159. def load_matrix(contents):
  160. matrix = numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
  161. for line in contents.split("\n")])
  162. return matrix
  163. def load_people(header, sequence, contents):
  164. import pandas
  165. start = header["begin"]
  166. last = header["end"]
  167. sampling = header["sampling"]
  168. start = datetime.fromtimestamp(int(start))
  169. last = datetime.fromtimestamp(int(last))
  170. sampling = int(sampling)
  171. people = []
  172. for name in sequence:
  173. people.append(numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
  174. for line in contents[name].split("\n")]).sum(axis=1))
  175. people = numpy.array(people)
  176. date_range_sampling = pandas.date_range(
  177. start + timedelta(days=sampling), periods=people[0].shape[0],
  178. freq="%dD" % sampling)
  179. return sequence, people, date_range_sampling, last
  180. def apply_plot_style(figure, axes, legend, style, text_size):
  181. figure.set_size_inches(12, 9)
  182. for side in ("bottom", "top", "left", "right"):
  183. axes.spines[side].set_color(style)
  184. for axis in (axes.xaxis, axes.yaxis):
  185. axis.label.update(dict(fontsize=text_size, color=style))
  186. for axis in ("x", "y"):
  187. axes.tick_params(axis=axis, colors=style, labelsize=text_size)
  188. if legend is not None:
  189. frame = legend.get_frame()
  190. for setter in (frame.set_facecolor, frame.set_edgecolor):
  191. setter("black" if style == "white" else "white")
  192. for text in legend.get_texts():
  193. text.set_color(style)
  194. def get_plot_path(base, name):
  195. root, ext = os.path.splitext(base)
  196. if not ext:
  197. ext = ".png"
  198. output = os.path.join(root, name + ext)
  199. os.makedirs(os.path.dirname(output), exist_ok=True)
  200. return output
  201. def deploy_plot(title, output, style):
  202. import matplotlib.pyplot as pyplot
  203. if not output:
  204. pyplot.gcf().canvas.set_window_title(title)
  205. pyplot.show()
  206. else:
  207. if title:
  208. pyplot.title(title, color=style)
  209. pyplot.tight_layout()
  210. pyplot.savefig(output, transparent=True)
  211. pyplot.clf()
  212. def plot_burndown(args, target, name, matrix, date_range_sampling, labels, granularity,
  213. sampling, resample):
  214. import matplotlib
  215. if args.backend:
  216. matplotlib.use(args.backend)
  217. import matplotlib.pyplot as pyplot
  218. pyplot.stackplot(date_range_sampling, matrix, labels=labels)
  219. if args.relative:
  220. for i in range(matrix.shape[1]):
  221. matrix[:, i] /= matrix[:, i].sum()
  222. pyplot.ylim(0, 1)
  223. legend_loc = 3
  224. else:
  225. legend_loc = 2
  226. legend = pyplot.legend(loc=legend_loc, fontsize=args.text_size)
  227. pyplot.ylabel("Lines of code")
  228. pyplot.xlabel("Time")
  229. apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size)
  230. pyplot.xlim(date_range_sampling[0], date_range_sampling[-1])
  231. locator = pyplot.gca().xaxis.get_major_locator()
  232. # set the optimal xticks locator
  233. if "M" not in resample:
  234. pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
  235. locs = pyplot.gca().get_xticks().tolist()
  236. if len(locs) >= 16:
  237. pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
  238. locs = pyplot.gca().get_xticks().tolist()
  239. if len(locs) >= 16:
  240. pyplot.gca().xaxis.set_major_locator(locator)
  241. if locs[0] < pyplot.xlim()[0]:
  242. del locs[0]
  243. endindex = -1
  244. if len(locs) >= 2 and \
  245. pyplot.xlim()[1] - locs[-1] > (locs[-1] - locs[-2]) / 2:
  246. locs.append(pyplot.xlim()[1])
  247. endindex = len(locs) - 1
  248. startindex = -1
  249. if len(locs) >= 2 and \
  250. locs[0] - pyplot.xlim()[0] > (locs[1] - locs[0]) / 2:
  251. locs.append(pyplot.xlim()[0])
  252. startindex = len(locs) - 1
  253. pyplot.gca().set_xticks(locs)
  254. # hacking time!
  255. labels = pyplot.gca().get_xticklabels()
  256. if startindex >= 0:
  257. labels[startindex].set_text(date_range_sampling[0].date())
  258. labels[startindex].set_text = lambda _: None
  259. labels[startindex].set_rotation(30)
  260. labels[startindex].set_ha("right")
  261. if endindex >= 0:
  262. labels[endindex].set_text(date_range_sampling[-1].date())
  263. labels[endindex].set_text = lambda _: None
  264. labels[endindex].set_rotation(30)
  265. labels[endindex].set_ha("right")
  266. title = "%s %d x %d (granularity %d, sampling %d)" % \
  267. ((name,) + matrix.shape + (granularity, sampling))
  268. output = args.output
  269. if output:
  270. if args.mode == "project" and target == "project":
  271. output = args.output
  272. else:
  273. if target == "project":
  274. name = "project"
  275. output = get_plot_path(args.output, name)
  276. deploy_plot(title, output, args.style)
  277. def plot_many(args, target, header, parts):
  278. if not args.output:
  279. print("Warning: output not set, showing %d plots." % len(parts))
  280. itercnt = progress.bar(parts.items(), expected_size=len(parts)) \
  281. if progress is not None else parts.items()
  282. stdout = io.StringIO()
  283. for name, matrix in itercnt:
  284. backup = sys.stdout
  285. sys.stdout = stdout
  286. plot_burndown(args, target, *load_main(header, name, matrix, args.resample))
  287. sys.stdout = backup
  288. sys.stdout.write(stdout.getvalue())
  289. def plot_matrix(args, repo, people, matrix):
  290. matrix = matrix.astype(float)
  291. zeros = matrix[:, 0] == 0
  292. matrix[zeros, :] = 1
  293. matrix /= matrix[:, 0][:, None]
  294. matrix = -matrix[:, 1:]
  295. matrix[zeros, :] = 0
  296. import matplotlib
  297. if args.backend:
  298. matplotlib.use(args.backend)
  299. import matplotlib.pyplot as pyplot
  300. s = 4 + matrix.shape[1] * 0.3
  301. fig = pyplot.figure(figsize=(s, s))
  302. ax = fig.add_subplot(111)
  303. ax.xaxis.set_label_position("top")
  304. ax.matshow(matrix, cmap=pyplot.cm.OrRd)
  305. ax.set_xticks(numpy.arange(0, matrix.shape[1]))
  306. ax.set_yticks(numpy.arange(0, matrix.shape[0]))
  307. ax.set_xticklabels(["Unidentified"] + people, rotation=90, ha="center")
  308. ax.set_yticklabels(people, va="center")
  309. ax.set_xticks(numpy.arange(0.5, matrix.shape[1] + 0.5), minor=True)
  310. ax.set_yticks(numpy.arange(0.5, matrix.shape[0] + 0.5), minor=True)
  311. ax.grid(which="minor")
  312. apply_plot_style(fig, ax, None, args.style, args.text_size)
  313. if not args.output:
  314. pos1 = ax.get_position()
  315. pos2 = (pos1.x0 + 0.245, pos1.y0 - 0.1, pos1.width * 0.9, pos1.height * 0.9)
  316. ax.set_position(pos2)
  317. if args.mode == "all":
  318. output = get_plot_path(args.output, "matrix")
  319. else:
  320. output = args.output
  321. title = "%s %d developers overwrite" % (repo, matrix.shape[0])
  322. if args.output:
  323. # FIXME(vmarkovtsev): otherwise the title is screwed in savefig()
  324. title = ""
  325. deploy_plot(title, output, args.style)
  326. def plot_people(args, repo, names, people, date_range, last):
  327. import matplotlib
  328. if args.backend:
  329. matplotlib.use(args.backend)
  330. import matplotlib.pyplot as pyplot
  331. pyplot.stackplot(date_range, people, labels=names)
  332. pyplot.xlim(date_range[0], last)
  333. if args.relative:
  334. for i in range(people.shape[1]):
  335. people[:, i] /= people[:, i].sum()
  336. pyplot.ylim(0, 1)
  337. legend_loc = 3
  338. else:
  339. legend_loc = 2
  340. legend = pyplot.legend(loc=legend_loc, fontsize=args.text_size)
  341. apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size)
  342. if args.mode == "all":
  343. output = get_plot_path(args.output, "people")
  344. else:
  345. output = args.output
  346. deploy_plot("%s code ratio through time" % repo, output, args.style)
  347. def train_embeddings(coocc_tree, tmpdir, shard_size=4096):
  348. from scipy.sparse import csr_matrix
  349. import tensorflow as tf
  350. try:
  351. from . import swivel
  352. except SystemError:
  353. import swivel
  354. index = coocc_tree["index"]
  355. print("Reading the sparse matrix...")
  356. data = []
  357. indices = []
  358. indptr = [0]
  359. for row, cd in enumerate(coocc_tree["matrix"]):
  360. for col, val in sorted(cd.items()):
  361. data.append(val)
  362. indices.append(col)
  363. indptr.append(indptr[-1] + len(cd))
  364. matrix = csr_matrix((data, indices, indptr), shape=(len(index), len(index)))
  365. meta_index = []
  366. for i, name in enumerate(index):
  367. meta_index.append((name, matrix[i, i]))
  368. with tempfile.TemporaryDirectory(prefix="hercules_labours_", dir=tmpdir or None) as tmproot:
  369. print("Writing Swivel metadata...")
  370. vocabulary = "\n".join(index)
  371. with open(os.path.join(tmproot, "row_vocab.txt"), "w") as out:
  372. out.write(vocabulary)
  373. with open(os.path.join(tmproot, "col_vocab.txt"), "w") as out:
  374. out.write(vocabulary)
  375. del vocabulary
  376. bool_sums = matrix.indptr[1:] - matrix.indptr[:-1]
  377. bool_sums_str = "\n".join(map(str, bool_sums.tolist()))
  378. with open(os.path.join(tmproot, "row_sums.txt"), "w") as out:
  379. out.write(bool_sums_str)
  380. with open(os.path.join(tmproot, "col_sums.txt"), "w") as out:
  381. out.write(bool_sums_str)
  382. del bool_sums_str
  383. reorder = numpy.argsort(-bool_sums)
  384. nshards = len(index) // shard_size
  385. if nshards * shard_size < len(index):
  386. nshards += 1
  387. shard_size = len(index) // nshards
  388. nshards = len(index) // shard_size
  389. print("Writing Swivel shards...")
  390. for row in range(nshards):
  391. for col in range(nshards):
  392. def _int64s(xs):
  393. return tf.train.Feature(
  394. int64_list=tf.train.Int64List(value=list(xs)))
  395. def _floats(xs):
  396. return tf.train.Feature(
  397. float_list=tf.train.FloatList(value=list(xs)))
  398. indices_row = reorder[row::nshards]
  399. indices_col = reorder[col::nshards]
  400. shard = matrix[indices_row][:, indices_col].tocoo()
  401. example = tf.train.Example(features=tf.train.Features(feature={
  402. "global_row": _int64s(indices_row),
  403. "global_col": _int64s(indices_col),
  404. "sparse_local_row": _int64s(shard.row),
  405. "sparse_local_col": _int64s(shard.col),
  406. "sparse_value": _floats(shard.data)}))
  407. with open(os.path.join(tmproot, "shard-%03d-%03d.pb" % (row, col)), "wb") as out:
  408. out.write(example.SerializeToString())
  409. print("Training Swivel model...")
  410. swivel.FLAGS.submatrix_rows = shard_size
  411. swivel.FLAGS.submatrix_cols = shard_size
  412. if len(index) < 10000:
  413. embedding_size = 50
  414. num_epochs = 40
  415. elif len(index) < 100000:
  416. embedding_size = 100
  417. num_epochs = 50
  418. elif len(index) < 500000:
  419. embedding_size = 200
  420. num_epochs = 60
  421. else:
  422. embedding_size = 300
  423. num_epochs = 80
  424. swivel.FLAGS.embedding_size = embedding_size
  425. swivel.FLAGS.input_base_path = tmproot
  426. swivel.FLAGS.output_base_path = tmproot
  427. swivel.FLAGS.loss_multiplier = 1.0 / shard_size
  428. swivel.FLAGS.num_epochs = num_epochs
  429. swivel.main(None)
  430. print("Reading Swivel embeddings...")
  431. embeddings = []
  432. with open(os.path.join(tmproot, "row_embedding.tsv")) as frow:
  433. with open(os.path.join(tmproot, "col_embedding.tsv")) as fcol:
  434. for i, (lrow, lcol) in enumerate(zip(frow, fcol)):
  435. prow, pcol = (l.split("\t", 1) for l in (lrow, lcol))
  436. assert prow[0] == pcol[0]
  437. erow, ecol = \
  438. (numpy.fromstring(p[1], dtype=numpy.float32, sep="\t")
  439. for p in (prow, pcol))
  440. embeddings.append((erow + ecol) / 2)
  441. return meta_index, embeddings
  442. def write_embeddings(name, output, index, embeddings):
  443. print("Writing Tensorflow Projector files...")
  444. if not output:
  445. output = "couples_" + name
  446. metaf = "%s_%s_meta.tsv" % (output, name)
  447. with open(metaf, "w") as fout:
  448. fout.write("name\tcommits\n")
  449. for pair in index:
  450. fout.write("%s\t%s\n" % pair)
  451. print("Wrote", metaf)
  452. dataf = "%s_%s_data.tsv" % (output, name)
  453. with open(dataf, "w") as fout:
  454. for vec in embeddings:
  455. fout.write("\t".join(str(v) for v in vec))
  456. fout.write("\n")
  457. print("Wrote", dataf)
  458. def main():
  459. args = parse_args()
  460. header, main_contents, files_contents, people_sequence, people_contents, people_matrix, \
  461. files_coocc, people_coocc = read_input(args)
  462. name = next(iter(main_contents))
  463. files_warning = "Files stats were not collected. Re-run hercules with -files."
  464. people_warning = "People stats were not collected. Re-run hercules with -people."
  465. if args.mode == "project":
  466. plot_burndown(args, "project",
  467. *load_main(header, name, main_contents[name], args.resample))
  468. elif args.mode == "file":
  469. if not files_contents:
  470. print(files_warning)
  471. return
  472. plot_many(args, "file", header, files_contents)
  473. elif args.mode == "person":
  474. if not people_contents:
  475. print(people_warning)
  476. return
  477. plot_many(args, "person", header, people_contents)
  478. elif args.mode == "matrix":
  479. if not people_contents:
  480. print(people_warning)
  481. return
  482. plot_matrix(args, name, people_sequence, load_matrix(people_matrix))
  483. elif args.mode == "people":
  484. if not people_contents:
  485. print(people_warning)
  486. return
  487. plot_people(args, name, *load_people(header, people_sequence, people_contents))
  488. elif args.mode == "couples":
  489. write_embeddings("files", args.output,
  490. *train_embeddings(files_coocc, args.couples_tmp_dir))
  491. write_embeddings("people", args.output,
  492. *train_embeddings(people_coocc, args.couples_tmp_dir))
  493. elif args.mode == "all":
  494. plot_burndown(args, "project",
  495. *load_main(header, name, main_contents[name], args.resample))
  496. if files_contents:
  497. plot_many(args, "file", header, files_contents)
  498. if people_contents:
  499. plot_many(args, "person", header, people_contents)
  500. plot_matrix(args, name, people_sequence, load_matrix(people_matrix))
  501. plot_people(args, name, *load_people(header, people_sequence, people_contents))
  502. if people_coocc:
  503. assert files_coocc
  504. write_embeddings("files", args.output,
  505. *train_embeddings(files_coocc, args.couples_tmp_dir))
  506. write_embeddings("people", args.output,
  507. *train_embeddings(people_coocc, args.couples_tmp_dir))
  508. if __name__ == "__main__":
  509. sys.exit(main())