labours.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. import argparse
  2. from datetime import datetime, timedelta
  3. import io
  4. import os
  5. import re
  6. import sys
  7. import tempfile
  8. import warnings
  9. try:
  10. from clint.textui import progress
  11. except ImportError:
  12. print("Warning: clint is not installed, no fancy progressbars in the terminal for you.")
  13. progress = None
  14. import numpy
  15. import yaml
  16. if sys.version_info[0] < 3:
  17. # OK, ancients, I will support Python 2, but you owe me a beer
  18. input = raw_input
  19. def parse_args():
  20. parser = argparse.ArgumentParser()
  21. parser.add_argument("-o", "--output", default="",
  22. help="Path to the output file/directory (empty for display).")
  23. parser.add_argument("-i", "--input", default="-",
  24. help="Path to the input file (- for stdin).")
  25. parser.add_argument("--text-size", default=12, type=int,
  26. help="Size of the labels and legend.")
  27. parser.add_argument("--backend", help="Matplotlib backend to use.")
  28. parser.add_argument("--style", choices=["black", "white"], default="black",
  29. help="Plot's general color scheme.")
  30. parser.add_argument("--relative", action="store_true",
  31. help="Occupy 100%% height for every measurement.")
  32. parser.add_argument("--couples-tmp-dir", help="Temporary directory to work with couples.")
  33. parser.add_argument("-m", "--mode",
  34. choices=["project", "file", "person", "matrix", "people", "couples",
  35. "all"],
  36. default="project", help="What to plot.")
  37. parser.add_argument(
  38. "--resample", default="year",
  39. help="The way to resample the time series. Possible values are: "
  40. "\"month\", \"year\", \"no\", \"raw\" and pandas offset aliases ("
  41. "http://pandas.pydata.org/pandas-docs/stable/timeseries.html"
  42. "#offset-aliases).")
  43. args = parser.parse_args()
  44. return args
  45. def read_input(args):
  46. sys.stdout.write("Reading the input... ")
  47. sys.stdout.flush()
  48. yaml.reader.Reader.NON_PRINTABLE = re.compile(r"(?!x)x")
  49. if args.input != "-":
  50. with open(args.input) as fin:
  51. data = yaml.load(fin)
  52. else:
  53. data = yaml.load(sys.stdin)
  54. print("done")
  55. return data["burndown"], data["project"], data.get("files"), data.get("people_sequence"), \
  56. data.get("people"), data.get("people_interaction"), data.get("files_coocc"), \
  57. data.get("people_coocc")
  58. def calculate_average_lifetime(matrix):
  59. lifetimes = numpy.zeros(matrix.shape[1] - 1)
  60. for band in matrix:
  61. start = 0
  62. for i, line in enumerate(band):
  63. if i == 0 or band[i - 1] == 0:
  64. start += 1
  65. continue
  66. lifetimes[i - start] = band[i - 1] - line
  67. lifetimes[i - start] = band[i - 1]
  68. return (lifetimes.dot(numpy.arange(1, matrix.shape[1], 1))
  69. / (lifetimes.sum() * matrix.shape[1]))
  70. def load_main(header, name, matrix, resample):
  71. import pandas
  72. start = header["begin"]
  73. last = header["end"]
  74. granularity = header["granularity"]
  75. sampling = header["sampling"]
  76. start = datetime.fromtimestamp(int(start))
  77. last = datetime.fromtimestamp(int(last))
  78. granularity = int(granularity)
  79. sampling = int(sampling)
  80. matrix = numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
  81. for line in matrix.split("\n")]).T
  82. print(name, "lifetime index:", calculate_average_lifetime(matrix))
  83. finish = start + timedelta(days=matrix.shape[1] * sampling)
  84. if resample not in ("no", "raw"):
  85. # Interpolate the day x day matrix.
  86. # Each day brings equal weight in the granularity.
  87. # Sampling's interpolation is linear.
  88. daily_matrix = numpy.zeros(
  89. (matrix.shape[0] * granularity, matrix.shape[1] * sampling),
  90. dtype=numpy.float32)
  91. epsrange = numpy.arange(0, 1, 1.0 / sampling)
  92. for y in range(matrix.shape[0]):
  93. for x in range(matrix.shape[1]):
  94. previous = matrix[y, x - 1] if x > 0 else 0
  95. value = ((previous + (matrix[y, x] - previous) * epsrange)
  96. / granularity)[numpy.newaxis, :]
  97. if (y + 1) * granularity <= x * sampling:
  98. daily_matrix[y * granularity:(y + 1) * granularity,
  99. x * sampling:(x + 1) * sampling] = value
  100. elif y * granularity <= (x + 1) * sampling:
  101. for suby in range(y * granularity, (y + 1) * granularity):
  102. for subx in range(suby, (x + 1) * sampling):
  103. daily_matrix[suby, subx] = matrix[
  104. y, x] / granularity
  105. daily_matrix[(last - start).days:] = 0
  106. # Resample the bands
  107. aliases = {
  108. "year": "A",
  109. "month": "M"
  110. }
  111. resample = aliases.get(resample, resample)
  112. periods = 0
  113. date_granularity_sampling = [start]
  114. while date_granularity_sampling[-1] < finish:
  115. periods += 1
  116. date_granularity_sampling = pandas.date_range(
  117. start, periods=periods, freq=resample)
  118. date_range_sampling = pandas.date_range(
  119. date_granularity_sampling[0],
  120. periods=(finish - date_granularity_sampling[0]).days,
  121. freq="1D")
  122. # Fill the new square matrix
  123. matrix = numpy.zeros(
  124. (len(date_granularity_sampling), len(date_range_sampling)),
  125. dtype=numpy.float32)
  126. for i, gdt in enumerate(date_granularity_sampling):
  127. istart = (date_granularity_sampling[i - 1] - start).days \
  128. if i > 0 else 0
  129. ifinish = (gdt - start).days
  130. for j, sdt in enumerate(date_range_sampling):
  131. if (sdt - start).days >= istart:
  132. break
  133. matrix[i, j:] = \
  134. daily_matrix[istart:ifinish, (sdt - start).days:].sum(axis=0)
  135. # Hardcode some cases to improve labels" readability
  136. if resample in ("year", "A"):
  137. labels = [dt.year for dt in date_granularity_sampling]
  138. elif resample in ("month", "M"):
  139. labels = [dt.strftime("%Y %B") for dt in date_granularity_sampling]
  140. else:
  141. labels = [dt.date() for dt in date_granularity_sampling]
  142. else:
  143. labels = [
  144. "%s - %s" % ((start + timedelta(days=i * granularity)).date(),
  145. (
  146. start + timedelta(days=(i + 1) * granularity)).date())
  147. for i in range(matrix.shape[0])]
  148. if len(labels) > 18:
  149. warnings.warn("Too many labels - consider resampling.")
  150. resample = "M" # fake resampling type is checked while plotting
  151. date_range_sampling = pandas.date_range(
  152. start + timedelta(days=sampling), periods=matrix.shape[1],
  153. freq="%dD" % sampling)
  154. return name, matrix, date_range_sampling, labels, granularity, sampling, resample
  155. def load_matrix(contents):
  156. matrix = numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
  157. for line in contents.split("\n")])
  158. return matrix
  159. def load_people(header, sequence, contents):
  160. import pandas
  161. start = header["begin"]
  162. last = header["end"]
  163. sampling = header["sampling"]
  164. start = datetime.fromtimestamp(int(start))
  165. last = datetime.fromtimestamp(int(last))
  166. sampling = int(sampling)
  167. people = []
  168. for name in sequence:
  169. people.append(numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
  170. for line in contents[name].split("\n")]).sum(axis=1))
  171. people = numpy.array(people)
  172. date_range_sampling = pandas.date_range(
  173. start + timedelta(days=sampling), periods=people[0].shape[0],
  174. freq="%dD" % sampling)
  175. return sequence, people, date_range_sampling, last
  176. def apply_plot_style(figure, axes, legend, style, text_size):
  177. figure.set_size_inches(12, 9)
  178. for side in ("bottom", "top", "left", "right"):
  179. axes.spines[side].set_color(style)
  180. for axis in (axes.xaxis, axes.yaxis):
  181. axis.label.update(dict(fontsize=text_size, color=style))
  182. for axis in ("x", "y"):
  183. axes.tick_params(axis=axis, colors=style, labelsize=text_size)
  184. if legend is not None:
  185. frame = legend.get_frame()
  186. for setter in (frame.set_facecolor, frame.set_edgecolor):
  187. setter("black" if style == "white" else "white")
  188. for text in legend.get_texts():
  189. text.set_color(style)
  190. def get_plot_path(base, name):
  191. root, ext = os.path.splitext(base)
  192. if not ext:
  193. ext = ".png"
  194. output = os.path.join(root, name + ext)
  195. os.makedirs(os.path.dirname(output), exist_ok=True)
  196. return output
  197. def deploy_plot(title, output, style):
  198. import matplotlib.pyplot as pyplot
  199. if not output:
  200. pyplot.gcf().canvas.set_window_title(title)
  201. pyplot.show()
  202. else:
  203. if title:
  204. pyplot.title(title, color=style)
  205. pyplot.tight_layout()
  206. pyplot.savefig(output, transparent=True)
  207. pyplot.clf()
  208. def plot_burndown(args, target, name, matrix, date_range_sampling, labels, granularity,
  209. sampling, resample):
  210. import matplotlib
  211. if args.backend:
  212. matplotlib.use(args.backend)
  213. import matplotlib.pyplot as pyplot
  214. pyplot.stackplot(date_range_sampling, matrix, labels=labels)
  215. if args.relative:
  216. for i in range(matrix.shape[1]):
  217. matrix[:, i] /= matrix[:, i].sum()
  218. pyplot.ylim(0, 1)
  219. legend_loc = 3
  220. else:
  221. legend_loc = 2
  222. legend = pyplot.legend(loc=legend_loc, fontsize=args.text_size)
  223. pyplot.ylabel("Lines of code")
  224. pyplot.xlabel("Time")
  225. apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size)
  226. pyplot.xlim(date_range_sampling[0], date_range_sampling[-1])
  227. locator = pyplot.gca().xaxis.get_major_locator()
  228. # set the optimal xticks locator
  229. if "M" not in resample:
  230. pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
  231. locs = pyplot.gca().get_xticks().tolist()
  232. if len(locs) >= 16:
  233. pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
  234. locs = pyplot.gca().get_xticks().tolist()
  235. if len(locs) >= 16:
  236. pyplot.gca().xaxis.set_major_locator(locator)
  237. if locs[0] < pyplot.xlim()[0]:
  238. del locs[0]
  239. endindex = -1
  240. if len(locs) >= 2 and \
  241. pyplot.xlim()[1] - locs[-1] > (locs[-1] - locs[-2]) / 2:
  242. locs.append(pyplot.xlim()[1])
  243. endindex = len(locs) - 1
  244. startindex = -1
  245. if len(locs) >= 2 and \
  246. locs[0] - pyplot.xlim()[0] > (locs[1] - locs[0]) / 2:
  247. locs.append(pyplot.xlim()[0])
  248. startindex = len(locs) - 1
  249. pyplot.gca().set_xticks(locs)
  250. # hacking time!
  251. labels = pyplot.gca().get_xticklabels()
  252. if startindex >= 0:
  253. labels[startindex].set_text(date_range_sampling[0].date())
  254. labels[startindex].set_text = lambda _: None
  255. labels[startindex].set_rotation(30)
  256. labels[startindex].set_ha("right")
  257. if endindex >= 0:
  258. labels[endindex].set_text(date_range_sampling[-1].date())
  259. labels[endindex].set_text = lambda _: None
  260. labels[endindex].set_rotation(30)
  261. labels[endindex].set_ha("right")
  262. title = "%s %d x %d (granularity %d, sampling %d)" % \
  263. ((name,) + matrix.shape + (granularity, sampling))
  264. output = args.output
  265. if output:
  266. if args.mode == "project" and target == "project":
  267. output = args.output
  268. else:
  269. if target == "project":
  270. name = "project"
  271. output = get_plot_path(args.output, name)
  272. deploy_plot(title, output, args.style)
  273. def plot_many(args, target, header, parts):
  274. if not args.output:
  275. print("Warning: output not set, showing %d plots." % len(parts))
  276. itercnt = progress.bar(parts.items(), expected_size=len(parts)) \
  277. if progress is not None else parts.items()
  278. stdout = io.StringIO()
  279. for name, matrix in itercnt:
  280. backup = sys.stdout
  281. sys.stdout = stdout
  282. plot_burndown(args, target, *load_main(header, name, matrix, args.resample))
  283. sys.stdout = backup
  284. sys.stdout.write(stdout.getvalue())
  285. def plot_matrix(args, repo, people, matrix):
  286. matrix = matrix.astype(float)
  287. zeros = matrix[:, 0] == 0
  288. matrix[zeros, :] = 1
  289. matrix /= matrix[:, 0][:, None]
  290. matrix = -matrix[:, 1:]
  291. matrix[zeros, :] = 0
  292. import matplotlib
  293. if args.backend:
  294. matplotlib.use(args.backend)
  295. import matplotlib.pyplot as pyplot
  296. s = 4 + matrix.shape[1] * 0.3
  297. fig = pyplot.figure(figsize=(s, s))
  298. ax = fig.add_subplot(111)
  299. ax.xaxis.set_label_position("top")
  300. ax.matshow(matrix, cmap=pyplot.cm.OrRd)
  301. ax.set_xticks(numpy.arange(0, matrix.shape[1]))
  302. ax.set_yticks(numpy.arange(0, matrix.shape[0]))
  303. ax.set_xticklabels(["Unidentified"] + people, rotation=90, ha="center")
  304. ax.set_yticklabels(people, va="center")
  305. ax.set_xticks(numpy.arange(0.5, matrix.shape[1] + 0.5), minor=True)
  306. ax.set_yticks(numpy.arange(0.5, matrix.shape[0] + 0.5), minor=True)
  307. ax.grid(which="minor")
  308. apply_plot_style(fig, ax, None, args.style, args.text_size)
  309. if not args.output:
  310. pos1 = ax.get_position()
  311. pos2 = (pos1.x0 + 0.245, pos1.y0 - 0.1, pos1.width * 0.9, pos1.height * 0.9)
  312. ax.set_position(pos2)
  313. if args.mode == "all":
  314. output = get_plot_path(args.output, "matrix")
  315. else:
  316. output = args.output
  317. title = "%s %d developers overwrite" % (repo, matrix.shape[0])
  318. if args.output:
  319. # FIXME(vmarkovtsev): otherwise the title is screwed in savefig()
  320. title = ""
  321. deploy_plot(title, output, args.style)
  322. def plot_people(args, repo, names, people, date_range, last):
  323. import matplotlib
  324. if args.backend:
  325. matplotlib.use(args.backend)
  326. import matplotlib.pyplot as pyplot
  327. pyplot.stackplot(date_range, people, labels=names)
  328. pyplot.xlim(date_range[0], last)
  329. if args.relative:
  330. for i in range(people.shape[1]):
  331. people[:, i] /= people[:, i].sum()
  332. pyplot.ylim(0, 1)
  333. legend_loc = 3
  334. else:
  335. legend_loc = 2
  336. legend = pyplot.legend(loc=legend_loc, fontsize=args.text_size)
  337. apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size)
  338. if args.mode == "all":
  339. output = get_plot_path(args.output, "people")
  340. else:
  341. output = args.output
  342. deploy_plot("%s code ratio through time" % repo, output, args.style)
  343. def train_embeddings(coocc_tree, tmpdir, shard_size=4096):
  344. from scipy.sparse import csr_matrix
  345. import tensorflow as tf
  346. try:
  347. from . import swivel
  348. except SystemError:
  349. import swivel
  350. index = coocc_tree["index"]
  351. print("Reading the sparse matrix...")
  352. data = []
  353. indices = []
  354. indptr = [0]
  355. for row, cd in enumerate(coocc_tree["matrix"]):
  356. for col, val in sorted(cd.items()):
  357. data.append(val)
  358. indices.append(col)
  359. indptr.append(indptr[-1] + len(cd))
  360. matrix = csr_matrix((data, indices, indptr), shape=(len(index), len(index)))
  361. meta_index = []
  362. for i, name in enumerate(index):
  363. meta_index.append((name, matrix[i, i]))
  364. with tempfile.TemporaryDirectory(prefix="hercules_labours_", dir=tmpdir or None) as tmproot:
  365. print("Writing Swivel metadata...")
  366. vocabulary = "\n".join(index)
  367. with open(os.path.join(tmproot, "row_vocab.txt"), "w") as out:
  368. out.write(vocabulary)
  369. with open(os.path.join(tmproot, "col_vocab.txt"), "w") as out:
  370. out.write(vocabulary)
  371. del vocabulary
  372. bool_sums = matrix.indptr[1:] - matrix.indptr[:-1]
  373. bool_sums_str = "\n".join(map(str, bool_sums.tolist()))
  374. with open(os.path.join(tmproot, "row_sums.txt"), "w") as out:
  375. out.write(bool_sums_str)
  376. with open(os.path.join(tmproot, "col_sums.txt"), "w") as out:
  377. out.write(bool_sums_str)
  378. del bool_sums_str
  379. reorder = numpy.argsort(-bool_sums)
  380. nshards = len(index) // shard_size
  381. if nshards == 0:
  382. nshards = 1
  383. shard_size = len(index)
  384. print("Writing Swivel shards...")
  385. for row in range(nshards):
  386. for col in range(nshards):
  387. def _int64s(xs):
  388. return tf.train.Feature(
  389. int64_list=tf.train.Int64List(value=list(xs)))
  390. def _floats(xs):
  391. return tf.train.Feature(
  392. float_list=tf.train.FloatList(value=list(xs)))
  393. indices_row = reorder[row::nshards]
  394. indices_col = reorder[col::nshards]
  395. shard = matrix[indices_row][:, indices_col].tocoo()
  396. example = tf.train.Example(features=tf.train.Features(feature={
  397. "global_row": _int64s(indices_row),
  398. "global_col": _int64s(indices_col),
  399. "sparse_local_row": _int64s(shard.row),
  400. "sparse_local_col": _int64s(shard.col),
  401. "sparse_value": _floats(shard.data)}))
  402. with open(os.path.join(tmproot, "shard-%03d-%03d.pb" % (row, col)), "wb") as out:
  403. out.write(example.SerializeToString())
  404. print("Training Swivel model...")
  405. swivel.FLAGS.submatrix_rows = shard_size
  406. swivel.FLAGS.submatrix_cols = shard_size
  407. if len(index) < 10000:
  408. embedding_size = 50
  409. num_epochs = 40
  410. elif len(index) < 100000:
  411. embedding_size = 100
  412. num_epochs = 50
  413. elif len(index) < 500000:
  414. embedding_size = 200
  415. num_epochs = 60
  416. else:
  417. embedding_size = 300
  418. num_epochs = 80
  419. swivel.FLAGS.embedding_size = embedding_size
  420. swivel.FLAGS.input_base_path = tmproot
  421. swivel.FLAGS.output_base_path = tmproot
  422. swivel.FLAGS.loss_multiplier = 1.0 / shard_size
  423. swivel.FLAGS.num_epochs = num_epochs
  424. swivel.main(None)
  425. print("Reading Swivel embeddings...")
  426. embeddings = []
  427. with open(os.path.join(tmproot, "row_embedding.tsv")) as frow:
  428. with open(os.path.join(tmproot, "col_embedding.tsv")) as fcol:
  429. for i, (lrow, lcol) in enumerate(zip(frow, fcol)):
  430. prow, pcol = (l.split("\t", 1) for l in (lrow, lcol))
  431. assert prow[0] == pcol[0]
  432. erow, ecol = \
  433. (numpy.fromstring(p[1], dtype=numpy.float32, sep="\t")
  434. for p in (prow, pcol))
  435. embeddings.append((erow + ecol) / 2)
  436. return meta_index, embeddings
  437. def write_embeddings(name, output, index, embeddings):
  438. print("Writing Tensorflow Projector files...")
  439. if not output:
  440. output = "couples_" + name
  441. metaf = "%s_%s_meta.tsv" % (output, name)
  442. with open(metaf, "w") as fout:
  443. fout.write("name\tcommits\n")
  444. for pair in index:
  445. fout.write("%s\t%s\n" % pair)
  446. print("Wrote", metaf)
  447. dataf = "%s_%s_data.tsv" % (output, name)
  448. with open(dataf, "w") as fout:
  449. for vec in embeddings:
  450. fout.write("\t".join(str(v) for v in vec))
  451. fout.write("\n")
  452. print("Wrote", dataf)
  453. def main():
  454. args = parse_args()
  455. header, main_contents, files_contents, people_sequence, people_contents, people_matrix, \
  456. files_coocc, people_coocc = read_input(args)
  457. name = next(iter(main_contents))
  458. files_warning = "Files stats were not collected. Re-run hercules with -files."
  459. people_warning = "People stats were not collected. Re-run hercules with -people."
  460. if args.mode == "project":
  461. plot_burndown(args, "project",
  462. *load_main(header, name, main_contents[name], args.resample))
  463. elif args.mode == "file":
  464. if not files_contents:
  465. print(files_warning)
  466. return
  467. plot_many(args, "file", header, files_contents)
  468. elif args.mode == "person":
  469. if not people_contents:
  470. print(people_warning)
  471. return
  472. plot_many(args, "person", header, people_contents)
  473. elif args.mode == "matrix":
  474. if not people_contents:
  475. print(people_warning)
  476. return
  477. plot_matrix(args, name, people_sequence, load_matrix(people_matrix))
  478. elif args.mode == "people":
  479. if not people_contents:
  480. print(people_warning)
  481. return
  482. plot_people(args, name, *load_people(header, people_sequence, people_contents))
  483. elif args.mode == "couples":
  484. write_embeddings("files", args.output,
  485. *train_embeddings(files_coocc, args.couples_tmp_dir))
  486. write_embeddings("people", args.output,
  487. *train_embeddings(people_coocc, args.couples_tmp_dir))
  488. elif args.mode == "all":
  489. plot_burndown(args, "project",
  490. *load_main(header, name, main_contents[name], args.resample))
  491. if files_contents:
  492. plot_many(args, "file", header, files_contents)
  493. if people_contents:
  494. plot_many(args, "person", header, people_contents)
  495. plot_matrix(args, name, people_sequence, load_matrix(people_matrix))
  496. plot_people(args, name, *load_people(header, people_sequence, people_contents))
  497. if people_coocc:
  498. assert files_coocc
  499. write_embeddings("files", args.output,
  500. *train_embeddings(files_coocc, args.couples_tmp_dir))
  501. write_embeddings("people", args.output,
  502. *train_embeddings(people_coocc, args.couples_tmp_dir))
  503. if __name__ == "__main__":
  504. sys.exit(main())