Browse Source

Create gh-pages branch via GitHub

Jiwon Kim 9 years ago
commit
c1c3704fd2

BIN
images/body-bg.png


BIN
images/highlight-bg.jpg


BIN
images/hr.png


BIN
images/octocat-icon.png


BIN
images/tar-gz-icon.png


BIN
images/zip-icon.png


+ 376 - 0
index.html

@@ -0,0 +1,376 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset='utf-8'>
+    <meta http-equiv="X-UA-Compatible" content="chrome=1">
+    <link href='https://fonts.googleapis.com/css?family=Chivo:900' rel='stylesheet' type='text/css'>
+    <link rel="stylesheet" type="text/css" href="stylesheets/stylesheet.css" media="screen">
+    <link rel="stylesheet" type="text/css" href="stylesheets/github-dark.css" media="screen">
+    <link rel="stylesheet" type="text/css" href="stylesheets/print.css" media="print">
+    <!--[if lt IE 9]>
+    <script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script>
+    <![endif]-->
+    <title>Awesome-deep-vision by kjw0612</title>
+  </head>
+
+  <body>
+    <div id="container">
+      <div class="inner">
+
+        <header>
+          <h1>Awesome-deep-vision</h1>
+          <h2>A curated list of deep learning resources for computer vision </h2>
+        </header>
+
+        <section id="downloads" class="clearfix">
+          <a href="https://github.com/kjw0612/awesome-deep-vision/zipball/master" id="download-zip" class="button"><span>Download .zip</span></a>
+          <a href="https://github.com/kjw0612/awesome-deep-vision/tarball/master" id="download-tar-gz" class="button"><span>Download .tar.gz</span></a>
+          <a href="https://github.com/kjw0612/awesome-deep-vision" id="view-on-github" class="button"><span>View on GitHub</span></a>
+        </section>
+
+        <hr>
+
+        <section id="main_content">
+          <h1>
+<a id="awesome-deep-vision" class="anchor" href="#awesome-deep-vision" aria-hidden="true"><span class="octicon octicon-link"></span></a>Awesome Deep Vision</h1>
+
+<p>A curated list of deep learning resources for computer vision, inspired by <a href="https://github.com/ziadoz/awesome-php">awesome-php</a> and <a href="https://github.com/jbhuang0604/awesome-computer-vision">awesome-computer-vision</a>.</p>
+
+<p>CVPR 2015 Papers to be Added Soon!</p>
+
+<h2>
+<a id="contributing" class="anchor" href="#contributing" aria-hidden="true"><span class="octicon octicon-link"></span></a>Contributing</h2>
+
+<p>Please feel free to <a href="https://github.com/kjw0612/awesome-deep-vision/pulls">pull requests</a> or email <a href="mailto:jiwon@alum.mit.edu">jiwon@alum.mit.edu</a> to add links.</p>
+
+<h2>
+<a id="sharing" class="anchor" href="#sharing" aria-hidden="true"><span class="octicon octicon-link"></span></a>Sharing</h2>
+
+<ul>
+<li><a href="http://twitter.com/home?status=https://github.com/kjw0612/awesome-deep-vision%0ADeep%20Learning%20Resources%20for%20Computer%20Vision">Share on Twitter</a></li>
+<li><a href="https://www.facebook.com/sharer/sharer.php?u=https://github.com/kjw0612/awesome-deep-vision">Share on Facebook</a></li>
+<li><a href="https://plus.google.com/share?url=https://github.com/kjw0612/awesome-deep-vision">Share on Google Plus</a></li>
+<li><a href="https://www.linkedin.com/shareArticle?mini=true&amp;url=https://github.com/kjw0612/awesome-deep-vision&amp;title=Awesome%20Deep%20Vision&amp;summary=&amp;source=">Share on LinkedIn</a></li>
+</ul>
+
+<h2>
+<a id="table-of-contents" class="anchor" href="#table-of-contents" aria-hidden="true"><span class="octicon octicon-link"></span></a>Table of Contents</h2>
+
+<ul>
+<li>
+<a href="#papers">Papers</a>
+
+<ul>
+<li><a href="#imagenet-classification">ImageNet Classification</a></li>
+<li><a href="#image-captioning">Image Captioning</a></li>
+<li><a href="#low-level-vision">Low-Level Vision</a></li>
+<li><a href="#edge-detection">Edge Detection</a></li>
+<li><a href="#semantic-segmentation">Semantic Segmentation</a></li>
+</ul>
+</li>
+<li><a href="#courses">Courses</a></li>
+<li>
+<a href="#software">Software</a>
+
+<ul>
+<li><a href="#framework">Framework</a></li>
+<li><a href="#applications">Applications</a></li>
+</ul>
+</li>
+<li><a href="#tutorials">Tutorials</a></li>
+<li><a href="#blogs">Blogs</a></li>
+</ul>
+
+<h2>
+<a id="papers" class="anchor" href="#papers" aria-hidden="true"><span class="octicon octicon-link"></span></a>Papers</h2>
+
+<h3>
+<a id="imagenet-classification" class="anchor" href="#imagenet-classification" aria-hidden="true"><span class="octicon octicon-link"></span></a>ImageNet Classification</h3>
+
+<ul>
+<li>Microsoft (PReLu/Weight Initialization) <a href="http://arxiv.org/pdf/1502.01852v1">[Paper]</a>
+
+<ul>
+<li>Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun, Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification, arXiv:1502.01852.</li>
+</ul>
+</li>
+<li>Batch Normalization <a href="http://arxiv.org/pdf/1502.03167v3">[Paper]</a>
+
+<ul>
+<li>Sergey Ioffe, Christian Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift, arXiv:1502.03167.</li>
+</ul>
+</li>
+<li>GoogLeNet <a href="http://arxiv.org/pdf/1409.4842v1">[Paper]</a>
+
+<ul>
+<li>Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich, CVPR 2015. </li>
+</ul>
+</li>
+<li>VGG-Net <a href="http://www.robots.ox.ac.uk/%7Evgg/research/very_deep/">[Web]</a> <a href="http://arxiv.org/pdf/1409.1556">[Paper]</a>
+
+<ul>
+<li>Karen Simonyan and Andrew Zisserman, Very Deep Convolutional Networks for Large-Scale Visual Recognition, ICLR 2015.</li>
+</ul>
+</li>
+<li>AlexNet <a href="http://books.nips.cc/papers/files/nips25/NIPS2012_0534.pdf">[Paper]</a>
+
+<ul>
+<li>Krizhevsky, A., Sutskever, I. and Hinton, G. E, ImageNet Classification with Deep Convolutional Neural Networks
+NIPS 2012.</li>
+</ul>
+</li>
+</ul>
+
+<h3>
+<a id="image-captioning" class="anchor" href="#image-captioning" aria-hidden="true"><span class="octicon octicon-link"></span></a>Image Captioning</h3>
+
+<ul>
+<li>Baidu/UCLA <a href="http://arxiv.org/pdf/1410.1090v1">[Paper]</a>
+
+<ul>
+<li>Junhua Mao, Wei Xu, Yi Yang, Jiang Wang, Alan L. Yuille, Explain Images with Multimodal Recurrent Neural Networks, arXiv:1410.1090 (2014).</li>
+</ul>
+</li>
+<li>Toronto <a href="http://arxiv.org/pdf/1411.2539v1">[Paper]</a>
+
+<ul>
+<li>Ryan Kiros, Ruslan Salakhutdinov, Richard S. Zemel, Unifying Visual-Semantic Embeddings with Multimodal Neural Language Models, arXiv:1411.2539 (2014).</li>
+</ul>
+</li>
+<li>Berkeley <a href="http://arxiv.org/pdf/1411.4389v3">[Paper]</a>
+
+<ul>
+<li>Jeff Donahue, Lisa Anne Hendricks, Sergio Guadarrama, Marcus Rohrbach, Subhashini Venugopalan, Kate Saenko, Trevor Darrell, Long-term Recurrent Convolutional Networks for Visual Recognition and Description, arXiv:1411.4389 (2014).</li>
+</ul>
+</li>
+<li>Google <a href="http://arxiv.org/pdf/1411.4555v2">[Paper]</a>
+
+<ul>
+<li>Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan, Show and Tell: A Neural Image Caption Generator, arXiv:1411.4555 (2014). </li>
+</ul>
+</li>
+<li>Stanford <a href="http://cs.stanford.edu/people/karpathy/deepimagesent/">[Web]</a> <a href="http://cs.stanford.edu/people/karpathy/cvpr2015.pdf">[Paper]</a>
+
+<ul>
+<li>Andrej Karpathy, Li Fei-Fei, Deep Visual-Semantic Alignments for Generating Image Description, CVPR (2015).</li>
+</ul>
+</li>
+<li>UML/UT <a href="http://arxiv.org/pdf/1412.4729v3">[Paper]</a>
+
+<ul>
+<li>Subhashini Venugopalan, Huijuan Xu, Jeff Donahue, Marcus Rohrbach, Raymond Mooney, Kate Saenko, Translating Videos to Natural Language Using Deep Recurrent Neural Networks, NAACL-HLT 2015. </li>
+</ul>
+</li>
+<li>Microsoft/CMU <a href="http://arxiv.org/pdf/1411.5654v1">[Paper]</a>
+
+<ul>
+<li>Xinlei Chen, C. Lawrence Zitnick, Learning a Recurrent Visual Representation for Image Caption Generation, arXiv:1411.5654.</li>
+</ul>
+</li>
+<li>Microsoft <a href="http://arxiv.org/pdf/1411.4952v3">[Paper]</a>
+
+<ul>
+<li>Hao Fang, Saurabh Gupta, Forrest Iandola, Rupesh Srivastava, Li Deng, Piotr Dollár, Jianfeng Gao, Xiaodong He, Margaret Mitchell, John C. Platt, C. Lawrence Zitnick, Geoffrey Zweig, From Captions to Visual Concepts and Back, CVPR 2015. </li>
+</ul>
+</li>
+</ul>
+
+<h3>
+<a id="low-level-vision" class="anchor" href="#low-level-vision" aria-hidden="true"><span class="octicon octicon-link"></span></a>Low-Level Vision</h3>
+
+<ul>
+<li>Optical Flow (FlowNet) <a href="http://arxiv.org/pdf/1504.06852v2">[Paper]</a>
+
+<ul>
+<li>Philipp Fischer, Alexey Dosovitskiy, Eddy Ilg, Philip Häusser, Caner Hazırbaş, Vladimir Golkov, Patrick van der Smagt, Daniel Cremers, Thomas Brox, FlowNet: Learning Optical Flow with Convolutional Networks, arXiv:1504.06852.</li>
+</ul>
+</li>
+<li>Super-Resolution (SRCNN) <a href="http://mmlab.ie.cuhk.edu.hk/projects/SRCNN.html">[Web]</a> <a href="http://personal.ie.cuhk.edu.hk/%7Eccloy/files/eccv_2014_deepresolution.pdf">[Paper-ECCV14]</a> <a href="http://arxiv.org/pdf/1501.00092v1.pdf">[Paper-arXiv15]</a>
+
+<ul>
+<li>Chao Dong, Chen Change Loy, Kaiming He, Xiaoou Tang, Learning a Deep Convolutional Network for Image Super-Resolution, in ECCV 2014</li>
+<li>Chao Dong, Chen Change Loy, Kaiming He, Xiaoou Tang. Image Super-Resolution Using Deep Convolutional Networks, arXiv:1501.00092 (2015)</li>
+</ul>
+</li>
+<li>Compression Artifacts Reduction <a href="http://arxiv.org/pdf/1504.06993v1">[Paper-arXiv15]</a>
+
+<ul>
+<li>Chao Dong, Yubin Deng, Chen Change Loy, Xiaoou Tang, Compression Artifacts Reduction by a Deep Convolutional Network, arXiv:1504.06993</li>
+</ul>
+</li>
+<li>Non-Uniform Motion Blur Removal <a href="http://arxiv.org/pdf/1503.00593v3">[Paper]</a>
+
+<ul>
+<li>Jian Sun, Wenfei Cao, Zongben Xu, Jean Ponce, Learning a Convolutional Neural Network for Non-uniform Motion Blur Removal, CVPR 2015. </li>
+</ul>
+</li>
+<li>Image Deconvolution <a href="http://lxu.me/projects/dcnn/">[Web]</a> <a href="http://lxu.me/mypapers/dcnn_nips14.pdf">[Paper]</a>
+
+<ul>
+<li> Li Xu, Jimmy SJ. Ren, Ce Liu, Jiaya Jia, "Deep Convolutional Neural Network for Image Deconvolution" Advances in Neural Information Processing Systems (NIPS), 2014.</li>
+</ul>
+</li>
+</ul>
+
+<h3>
+<a id="edge-detection" class="anchor" href="#edge-detection" aria-hidden="true"><span class="octicon octicon-link"></span></a>Edge Detection</h3>
+
+<ul>
+<li>Holistically-Nested Edge Detection <a href="http://arxiv.org/pdf/1504.06375v1">[Paper]</a>
+
+<ul>
+<li>Saining Xie, Zhuowen Tu, Holistically-Nested Edge Detection, arXiv:1504.06375. </li>
+</ul>
+</li>
+<li>DeepEdge <a href="http://arxiv.org/pdf/1412.1123v3">[Paper]</a>
+
+<ul>
+<li>Gedas Bertasius, Jianbo Shi, Lorenzo Torresani, DeepEdge: A Multi-Scale Bifurcated Deep Network for Top-Down Contour Detection, CVPR 2015.</li>
+</ul>
+</li>
+<li>DeepContour <a href="http://mc.eistar.net/UpLoadFiles/Papers/DeepContour_cvpr15.pdf">[Paper]</a>
+
+<ul>
+<li>Wei Shen, Xinggang Wang, Yan Wang, Xiang Bai, Zhijiang Zhang, DeepContour: A Deep Convolutional Feature Learned by Positive-Sharing Loss for Contour Detection, CVPR 2015.</li>
+</ul>
+</li>
+</ul>
+
+<h3>
+<a id="semantic-segmentation" class="anchor" href="#semantic-segmentation" aria-hidden="true"><span class="octicon octicon-link"></span></a>Semantic Segmentation</h3>
+
+<ul>
+<li>Learning Hierarchical Features for Scene Labeling <a href="http://yann.lecun.com/exdb/publis/pdf/farabet-icml-12.pdf">[Paper-ICML12]</a> <a href="http://yann.lecun.com/exdb/publis/pdf/farabet-pami-13.pdf">[Paper-PAMI13]</a>
+
+<ul>
+<li>Clement Farabet, Camille Couprie, Laurent Najman, Yann LeCun, Scene Parsing with Multiscale Feature Learning, Purity Trees, and Optimal Covers, ICML, 2012.</li>
+<li>Clement Farabet, Camille Couprie, Laurent Najman, Yann LeCun, Learning Hierarchical Features for Scene Labeling, PAMI, 2013.</li>
+</ul>
+</li>
+<li>R-CNN <a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Girshick_Rich_Feature_Hierarchies_2014_CVPR_paper.pdf">[Paper-CVPR14]</a> <a href="http://arxiv.org/pdf/1311.2524v5">[Paper-arXiv14]</a>
+
+<ul>
+<li>Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik, Rich feature hierarchies for accurate object detection and semantic segmentation, CVPR, 2014.</li>
+</ul>
+</li>
+<li>Fully Convolutional Networks for Semantic Segmentation <a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Long_Fully_Convolutional_Networks_2015_CVPR_paper.pdf">[Paper-CVPR15]</a> <a href="http://arxiv.org/pdf/1411.4038v2">[Paper-arXiv15]</a>
+
+<ul>
+<li>Jonathan Long, Evan Shelhamer, Trevor Darrell, Fully Convolutional Networks for Semantic Segmentation, CVPR, 2015.</li>
+</ul>
+</li>
+<li>Conditional Random Fields as Recurrent Neural Networks <a href="http://arxiv.org/pdf/1502.03240v2">[Paper]</a>
+
+<ul>
+<li>Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du, Chang Huang, Philip H. S. Torr, Conditional Random Fields as Recurrent Neural Networks, arXiv:1502.03240</li>
+</ul>
+</li>
+<li>BoxSup <a href="http://arxiv.org/pdf/1503.01640v2">[Paper]</a>
+
+<ul>
+<li>Jifeng Dai, Kaiming He, Jian Sun, BoxSup: Exploiting Bounding Boxes to Supervise Convolutional Networks for Semantic Segmentation, arXiv:1503.01640</li>
+</ul>
+</li>
+</ul>
+
+<h2>
+<a id="courses" class="anchor" href="#courses" aria-hidden="true"><span class="octicon octicon-link"></span></a>Courses</h2>
+
+<ul>
+<li>[Stanford] <a href="http://cs231n.stanford.edu/">CS231n: Convolutional Neural Networks for Visual Recognition</a>
+</li>
+<li>[CUHK] <a href="https://piazza.com/cuhk.edu.hk/spring2015/eleg5040/home">ELEG 5040: Advanced Topics in Signal Processing(Introduction to Deep Learning)</a>
+</li>
+</ul>
+
+<h2>
+<a id="software" class="anchor" href="#software" aria-hidden="true"><span class="octicon octicon-link"></span></a>Software</h2>
+
+<h3>
+<a id="framework" class="anchor" href="#framework" aria-hidden="true"><span class="octicon octicon-link"></span></a>Framework</h3>
+
+<ul>
+<li>Torch7: Deep learning library in Lua, used by Facebook and Google Deepmind <a href="http://torch.ch/">[Web]</a>
+</li>
+<li>Caffe: Deep learning framework by the BVLC <a href="http://caffe.berkeleyvision.org/">[Web]</a>
+</li>
+<li>MatConvNet: CNNs for MATLAB <a href="http://www.vlfeat.org/matconvnet/">[Web]</a>
+</li>
+</ul>
+
+<h3>
+<a id="applications" class="anchor" href="#applications" aria-hidden="true"><span class="octicon octicon-link"></span></a>Applications</h3>
+
+<ul>
+<li>Adversarial Training 
+
+<ul>
+<li>Code and hyperparameters for the paper "Generative Adversarial Networks" <a href="https://github.com/goodfeli/adversarial">[Web]</a>
+</li>
+</ul>
+</li>
+<li>Understanding and Visualizing
+
+<ul>
+<li>Source code for "Understanding Deep Image Representations by Inverting Them", CVPR 2015. <a href="https://github.com/aravindhm/deep-goggle">[Web]</a>
+</li>
+</ul>
+</li>
+<li>Semenatic Segmentation
+
+<ul>
+<li>Source code for the paper "Rich feature hierarchies for accurate object detection and semantic segmentation", CVPR 2014. <a href="https://github.com/rbgirshick/rcnn">[Web]</a>
+</li>
+<li>Source code for the paper "Fully Convolutional Networks for Semantic Segmentation", CVPR 2015. <a href="https://github.com/longjon/caffe/tree/future">[Web]</a>
+</li>
+</ul>
+</li>
+<li>Super-Resolution
+
+<ul>
+<li>Image Super-Resolution for Anime-Style-Art <a href="https://github.com/nagadomi/waifu2x">[Web]</a>
+</li>
+</ul>
+</li>
+<li>Edge Detection
+
+<ul>
+<li>Source code for the paper "DeepContour: A Deep Convolutional Feature Learned by Positive-Sharing Loss for Contour Detection" CVPR 2015. <a href="https://github.com/shenwei1231/DeepContour">[Web]</a>
+</li>
+</ul>
+</li>
+</ul>
+
+<h2>
+<a id="tutorials" class="anchor" href="#tutorials" aria-hidden="true"><span class="octicon octicon-link"></span></a>Tutorials</h2>
+
+<ul>
+<li>[CVPR 2014] <a href="https://sites.google.com/site/deeplearningcvpr2014/">Tutorial on Deep Learning in Computer Vision</a>
+</li>
+<li>[CVPR 2015] <a href="http://torch.ch/docs/cvpr15.html">Applied Deep Learning for Computer Vision with Torch</a>
+</li>
+</ul>
+
+<h2>
+<a id="blogs" class="anchor" href="#blogs" aria-hidden="true"><span class="octicon octicon-link"></span></a>Blogs</h2>
+
+<ul>
+<li><a href="http://www.computervisionblog.com/2015/06/deep-down-rabbit-hole-cvpr-2015-and.html">Deep down the rabbit hole: CVPR 2015 and beyond@Tombone's Computer Vision Blog</a></li>
+<li><a href="http://zoyathinks.blogspot.kr/2015/06/cvpr-recap-and-where-were-going.html">CVPR recap and where we're going@Zoya Bylinskii (MIT PhD Student)'s Blog</a></li>
+<li><a href="http://www.wired.com/2015/06/facebook-googles-fake-brains-spawn-new-visual-reality/">Facebook's AI Painting@Wired</a></li>
+<li><a href="http://googleresearch.blogspot.kr/2015/06/inceptionism-going-deeper-into-neural.html">Inceptionism: Going Deeper into Neural Networks@Google Research</a></li>
+</ul>
+
+<p>Maintainers - <a href="http://github.com/kjw0612">Jiwon Kim</a>, <a href="https://github.com/hmyeong">Heesoo Myeong</a>, <a href="http://github.com/myungsub">Myungsub Choi</a>, <a href="https://github.com/JanghoonChoi">JanghoonChoi</a>, <a href="http://github.com/deruci">Jung Kwon Lee</a></p>
+        </section>
+
+        <footer>
+          Awesome-deep-vision is maintained by <a href="https://github.com/kjw0612">kjw0612</a><br>
+          This page was generated by <a href="https://pages.github.com">GitHub Pages</a>. Tactile theme by <a href="https://twitter.com/jasonlong">Jason Long</a>.
+        </footer>
+
+        
+      </div>
+    </div>
+  </body>
+</html>

+ 1 - 0
javascripts/main.js

@@ -0,0 +1 @@
+console.log('This would be the main JS file.');

File diff suppressed because it is too large
+ 1 - 0
params.json


+ 116 - 0
stylesheets/github-dark.css

@@ -0,0 +1,116 @@
+/*
+   Copyright 2014 GitHub Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+*/
+
+.pl-c /* comment */ {
+  color: #969896;
+}
+
+.pl-c1      /* constant, markup.raw, meta.diff.header, meta.module-reference, meta.property-name, support, support.constant, support.variable, variable.other.constant */,
+.pl-s .pl-v /* string variable */ {
+  color: #0099cd;
+}
+
+.pl-e  /* entity */,
+.pl-en /* entity.name */ {
+  color: #9774cb;
+}
+
+.pl-s .pl-s1 /* string source */,
+.pl-smi      /* storage.modifier.import, storage.modifier.package, storage.type.java, variable.other, variable.parameter.function */ {
+  color: #ddd;
+}
+
+.pl-ent /* entity.name.tag */ {
+  color: #7bcc72;
+}
+
+.pl-k /* keyword, storage, storage.type */ {
+  color: #cc2372;
+}
+
+.pl-pds              /* punctuation.definition.string, string.regexp.character-class */,
+.pl-s                /* string */,
+.pl-s .pl-pse .pl-s1 /* string punctuation.section.embedded source */,
+.pl-sr               /* string.regexp */,
+.pl-sr .pl-cce       /* string.regexp constant.character.escape */,
+.pl-sr .pl-sra       /* string.regexp string.regexp.arbitrary-repitition */,
+.pl-sr .pl-sre       /* string.regexp source.ruby.embedded */ {
+  color: #3c66e2;
+}
+
+.pl-v /* variable */ {
+  color: #fb8764;
+}
+
+.pl-id /* invalid.deprecated */ {
+  color: #e63525;
+}
+
+.pl-ii /* invalid.illegal */ {
+  background-color: #e63525;
+  color: #f8f8f8;
+}
+
+.pl-sr .pl-cce /* string.regexp constant.character.escape */ {
+  color: #7bcc72;
+  font-weight: bold;
+}
+
+.pl-ml /* markup.list */ {
+  color: #c26b2b;
+}
+
+.pl-mh        /* markup.heading */,
+.pl-mh .pl-en /* markup.heading entity.name */,
+.pl-ms        /* meta.separator */ {
+  color: #264ec5;
+  font-weight: bold;
+}
+
+.pl-mq /* markup.quote */ {
+  color: #00acac;
+}
+
+.pl-mi /* markup.italic */ {
+  color: #ddd;
+  font-style: italic;
+}
+
+.pl-mb /* markup.bold */ {
+  color: #ddd;
+  font-weight: bold;
+}
+
+.pl-md /* markup.deleted, meta.diff.header.from-file */ {
+  background-color: #ffecec;
+  color: #bd2c00;
+}
+
+.pl-mi1 /* markup.inserted, meta.diff.header.to-file */ {
+  background-color: #eaffea;
+  color: #55a532;
+}
+
+.pl-mdr /* meta.diff.range */ {
+  color: #9774cb;
+  font-weight: bold;
+}
+
+.pl-mo /* meta.output */ {
+  color: #264ec5;
+}
+

+ 228 - 0
stylesheets/print.css

@@ -0,0 +1,228 @@
+html, body, div, span, applet, object, iframe,
+h1, h2, h3, h4, h5, h6, p, blockquote, pre,
+a, abbr, acronym, address, big, cite, code,
+del, dfn, em, img, ins, kbd, q, s, samp,
+small, strike, strong, sub, sup, tt, var,
+b, u, i, center,
+dl, dt, dd, ol, ul, li,
+fieldset, form, label, legend,
+table, caption, tbody, tfoot, thead, tr, th, td,
+article, aside, canvas, details, embed,
+figure, figcaption, footer, header, hgroup,
+menu, nav, output, ruby, section, summary,
+time, mark, audio, video {
+  padding: 0;
+  margin: 0;
+  font: inherit;
+  font-size: 100%;
+  vertical-align: baseline;
+  border: 0;
+}
+/* HTML5 display-role reset for older browsers */
+article, aside, details, figcaption, figure,
+footer, header, hgroup, menu, nav, section {
+  display: block;
+}
+body {
+  line-height: 1;
+}
+ol, ul {
+  list-style: none;
+}
+blockquote, q {
+  quotes: none;
+}
+blockquote:before, blockquote:after,
+q:before, q:after {
+  content: '';
+  content: none;
+}
+table {
+  border-spacing: 0;
+  border-collapse: collapse;
+}
+body {
+  font-family: 'Helvetica Neue', Helvetica, Arial, serif;
+  font-size: 13px;
+  line-height: 1.5;
+  color: #000;
+}
+
+a {
+  font-weight: bold;
+  color: #d5000d;
+}
+
+header {
+  padding-top: 35px;
+  padding-bottom: 10px;
+}
+
+header h1 {
+  font-size: 48px;
+  font-weight: bold;
+  line-height: 1.2;
+  color: #303030;
+  letter-spacing: -1px;
+}
+
+header h2 {
+  font-size: 24px;
+  font-weight: normal;
+  line-height: 1.3;
+  color: #aaa;
+  letter-spacing: -1px;
+}
+#downloads {
+  display: none;
+}
+#main_content {
+  padding-top: 20px;
+}
+
+code, pre {
+  margin-bottom: 30px;
+  font-family: Monaco, "Bitstream Vera Sans Mono", "Lucida Console", Terminal;
+  font-size: 12px;
+  color: #222;
+}
+
+code {
+  padding: 0 3px;
+}
+
+pre {
+  padding: 20px;
+  overflow: auto;
+  border: solid 1px #ddd;
+}
+pre code {
+  padding: 0;
+}
+
+ul, ol, dl {
+  margin-bottom: 20px;
+}
+
+
+/* COMMON STYLES */
+
+table {
+  width: 100%;
+  border: 1px solid #ebebeb;
+}
+
+th {
+  font-weight: 500;
+}
+
+td {
+  font-weight: 300;
+  text-align: center;
+  border: 1px solid #ebebeb;
+}
+
+form {
+  padding: 20px;
+  background: #f2f2f2;
+
+}
+
+
+/* GENERAL ELEMENT TYPE STYLES */
+
+h1 {
+  font-size: 2.8em;
+}
+
+h2 {
+  margin-bottom: 8px;
+  font-size: 22px;
+  font-weight: bold;
+  color: #303030;
+}
+
+h3 {
+  margin-bottom: 8px;
+  font-size: 18px;
+  font-weight: bold;
+  color: #d5000d;
+}
+
+h4 {
+  font-size: 16px;
+  font-weight: bold;
+  color: #303030;
+}
+
+h5 {
+  font-size: 1em;
+  color: #303030;
+}
+
+h6 {
+  font-size: .8em;
+  color: #303030;
+}
+
+p {
+  margin-bottom: 20px;
+  font-weight: 300;
+}
+
+a {
+  text-decoration: none;
+}
+
+p a {
+  font-weight: 400;
+}
+
+blockquote {
+  padding: 0 0 0 30px;
+  margin-bottom: 20px;
+  font-size: 1.6em;
+  border-left: 10px solid #e9e9e9;
+}
+
+ul li {
+  list-style-position: inside;
+  list-style: disc;
+  padding-left: 20px;
+}
+
+ol li {
+  list-style-position: inside;
+  list-style: decimal;
+  padding-left: 3px;
+}
+
+dl dd {
+  font-style: italic;
+  font-weight: 100;
+}
+
+footer {
+  padding-top: 20px;
+  padding-bottom: 30px;
+  margin-top: 40px;
+  font-size: 13px;
+  color: #aaa;
+}
+
+footer a {
+  color: #666;
+}
+
+/* MISC */
+.clearfix:after {
+  display: block;
+  height: 0;
+  clear: both;
+  visibility: hidden;
+  content: '.';
+}
+
+.clearfix {display: inline-block;}
+* html .clearfix {height: 1%;}
+.clearfix {display: block;}

+ 373 - 0
stylesheets/stylesheet.css

@@ -0,0 +1,373 @@
+/* http://meyerweb.com/eric/tools/css/reset/
+   v2.0 | 20110126
+   License: none (public domain)
+*/
+html, body, div, span, applet, object, iframe,
+h1, h2, h3, h4, h5, h6, p, blockquote, pre,
+a, abbr, acronym, address, big, cite, code,
+del, dfn, em, img, ins, kbd, q, s, samp,
+small, strike, strong, sub, sup, tt, var,
+b, u, i, center,
+dl, dt, dd, ol, ul, li,
+fieldset, form, label, legend,
+table, caption, tbody, tfoot, thead, tr, th, td,
+article, aside, canvas, details, embed,
+figure, figcaption, footer, header, hgroup,
+menu, nav, output, ruby, section, summary,
+time, mark, audio, video {
+	padding: 0;
+	margin: 0;
+	font: inherit;
+	font-size: 100%;
+	vertical-align: baseline;
+	border: 0;
+}
+/* HTML5 display-role reset for older browsers */
+article, aside, details, figcaption, figure,
+footer, header, hgroup, menu, nav, section {
+	display: block;
+}
+body {
+	line-height: 1;
+}
+ol, ul {
+	list-style: none;
+}
+blockquote, q {
+	quotes: none;
+}
+blockquote:before, blockquote:after,
+q:before, q:after {
+	content: '';
+	content: none;
+}
+table {
+	border-spacing: 0;
+	border-collapse: collapse;
+}
+
+/* LAYOUT STYLES */
+body {
+  font-family: 'Helvetica Neue', Helvetica, Arial, serif;
+  font-size: 1em;
+  line-height: 1.5;
+  color: #6d6d6d;
+  text-shadow: 0 1px 0 rgba(255, 255, 255, 0.8);
+  background: #e7e7e7 url(../images/body-bg.png) 0 0 repeat;
+}
+
+a {
+  color: #d5000d;
+}
+a:hover {
+  color: #c5000c;
+}
+
+header {
+  padding-top: 35px;
+  padding-bottom: 25px;
+}
+
+header h1 {
+  font-family: 'Chivo', 'Helvetica Neue', Helvetica, Arial, serif;
+  font-size: 48px; font-weight: 900;
+  line-height: 1.2;
+  color: #303030;
+  letter-spacing: -1px;
+}
+
+header h2 {
+  font-size: 24px;
+  font-weight: normal;
+  line-height: 1.3;
+  color: #aaa;
+  letter-spacing: -1px;
+}
+
+#container {
+  min-height: 595px;
+  background: transparent url(../images/highlight-bg.jpg) 50% 0 no-repeat;
+}
+
+.inner {
+  width: 620px;
+  margin: 0 auto;
+}
+
+#container .inner img {
+  max-width: 100%;
+}
+
+#downloads {
+  margin-bottom: 40px;
+}
+
+a.button {
+  display: block;
+  float: left;
+  width: 179px;
+  padding: 12px 8px 12px 8px;
+  margin-right: 14px;
+  font-size: 15px;
+  font-weight: bold;
+  line-height: 25px;
+  color: #303030;
+  background: #fdfdfd; /* Old browsers */
+  background: -moz-linear-gradient(top,  #fdfdfd 0%, #f2f2f2 100%); /* FF3.6+ */
+  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#fdfdfd), color-stop(100%,#f2f2f2)); /* Chrome,Safari4+ */
+  background: -webkit-linear-gradient(top,  #fdfdfd 0%,#f2f2f2 100%); /* Chrome10+,Safari5.1+ */
+  background: -o-linear-gradient(top,  #fdfdfd 0%,#f2f2f2 100%); /* Opera 11.10+ */
+  background: -ms-linear-gradient(top,  #fdfdfd 0%,#f2f2f2 100%); /* IE10+ */
+  background: linear-gradient(top,  #fdfdfd 0%,#f2f2f2 100%); /* W3C */
+  filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#fdfdfd', endColorstr='#f2f2f2',GradientType=0 ); /* IE6-9 */
+  border-top: solid 1px #cbcbcb;
+  border-right: solid 1px #b7b7b7;
+  border-bottom: solid 1px #b3b3b3;
+  border-left: solid 1px #b7b7b7;
+  border-radius: 30px;
+  -webkit-box-shadow: 10px 10px 5px #888;
+  -moz-box-shadow: 10px 10px 5px #888;
+  box-shadow: 0px 1px 5px #e8e8e8;
+  -moz-border-radius: 30px;
+  -webkit-border-radius: 30px;
+}
+a.button:hover {
+  background: #fafafa; /* Old browsers */
+  background: -moz-linear-gradient(top,  #fdfdfd 0%, #f6f6f6 100%); /* FF3.6+ */
+  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#fdfdfd), color-stop(100%,#f6f6f6)); /* Chrome,Safari4+ */
+  background: -webkit-linear-gradient(top,  #fdfdfd 0%,#f6f6f6 100%); /* Chrome10+,Safari5.1+ */
+  background: -o-linear-gradient(top,  #fdfdfd 0%,#f6f6f6 100%); /* Opera 11.10+ */
+  background: -ms-linear-gradient(top,  #fdfdfd 0%,#f6f6f6 100%); /* IE10+ */
+  background: linear-gradient(top,  #fdfdfd 0%,#f6f6f6, 100%); /* W3C */
+  filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#fdfdfd', endColorstr='#f6f6f6',GradientType=0 ); /* IE6-9 */
+  border-top: solid 1px #b7b7b7;
+  border-right: solid 1px #b3b3b3;
+  border-bottom: solid 1px #b3b3b3;
+  border-left: solid 1px #b3b3b3;
+}
+
+a.button span {
+  display: block;
+  height: 23px;
+  padding-left: 50px;
+}
+
+#download-zip span {
+  background: transparent url(../images/zip-icon.png) 12px 50% no-repeat;
+}
+#download-tar-gz span {
+  background: transparent url(../images/tar-gz-icon.png) 12px 50% no-repeat;
+}
+#view-on-github span {
+  background: transparent url(../images/octocat-icon.png) 12px 50% no-repeat;
+}
+#view-on-github {
+  margin-right: 0;
+}
+
+code, pre {
+  margin-bottom: 30px;
+  font-family: Monaco, "Bitstream Vera Sans Mono", "Lucida Console", Terminal;
+  font-size: 14px;
+  color: #222;
+}
+
+code {
+  padding: 0 3px;
+  background-color: #f2f2f2;
+  border: solid 1px #ddd;
+}
+
+pre {
+  padding: 20px;
+  overflow: auto;
+  color: #f2f2f2;
+  text-shadow: none;
+  background: #303030;
+}
+pre code {
+  padding: 0;
+  color: #f2f2f2;
+  background-color: #303030;
+  border: none;
+}
+
+ul, ol, dl {
+  margin-bottom: 20px;
+}
+
+
+/* COMMON STYLES */
+
+hr {
+  height: 1px;
+  padding-bottom: 1em;
+  margin-top: 1em;
+  line-height: 1px;
+  background: transparent url('../images/hr.png') 50% 0 no-repeat;
+  border: none;
+}
+
+strong {
+  font-weight: bold;
+}
+
+em {
+  font-style: italic;
+}
+
+table {
+  width: 100%;
+  border: 1px solid #ebebeb;
+}
+
+th {
+  font-weight: 500;
+}
+
+td {
+  font-weight: 300;
+  text-align: center;
+  border: 1px solid #ebebeb;
+}
+
+form {
+  padding: 20px;
+  background: #f2f2f2;
+
+}
+
+
+/* GENERAL ELEMENT TYPE STYLES */
+
+h1 {
+  font-size: 32px;
+}
+
+h2 {
+  margin-bottom: 8px;
+  font-size: 22px;
+  font-weight: bold;
+  color: #303030;
+}
+
+h3 {
+  margin-bottom: 8px;
+  font-size: 18px;
+  font-weight: bold;
+  color: #d5000d;
+}
+
+h4 {
+  font-size: 16px;
+  font-weight: bold;
+  color: #303030;
+}
+
+h5 {
+  font-size: 1em;
+  color: #303030;
+}
+
+h6 {
+  font-size: .8em;
+  color: #303030;
+}
+
+p {
+  margin-bottom: 20px;
+  font-weight: 300;
+}
+
+a {
+  text-decoration: none;
+}
+
+p a {
+  font-weight: 400;
+}
+
+blockquote {
+  padding: 0 0 0 30px;
+  margin-bottom: 20px;
+  font-size: 1.6em;
+  border-left: 10px solid #e9e9e9;
+}
+
+ul li {
+  list-style-position: inside;
+  list-style: disc;
+  padding-left: 20px;
+}
+
+ol li {
+  list-style-position: inside;
+  list-style: decimal;
+  padding-left: 3px;
+}
+
+dl dt {
+  color: #303030;
+}
+
+footer {
+  padding-top: 20px;
+  padding-bottom: 30px;
+  margin-top: 40px;
+  font-size: 13px;
+  color: #aaa;
+  background: transparent url('../images/hr.png') 0 0 no-repeat;
+}
+
+footer a {
+  color: #666;
+}
+footer a:hover {
+  color: #444;
+}
+
+/* MISC */
+.clearfix:after {
+  display: block;
+  height: 0;
+  clear: both;
+  visibility: hidden;
+  content: '.';
+}
+
+.clearfix {display: inline-block;}
+* html .clearfix {height: 1%;}
+.clearfix {display: block;}
+
+/* #Media Queries
+================================================== */
+
+/* Smaller than standard 960 (devices and browsers) */
+@media only screen and (max-width: 959px) { }
+
+/* Tablet Portrait size to standard 960 (devices and browsers) */
+@media only screen and (min-width: 768px) and (max-width: 959px) { }
+
+/* All Mobile Sizes (devices and browser) */
+@media only screen and (max-width: 767px) {
+  header {
+    padding-top: 10px;
+    padding-bottom: 10px;
+  }
+  #downloads {
+    margin-bottom: 25px;
+  }
+  #download-zip, #download-tar-gz {
+    display: none;
+  }
+  .inner {
+    width: 94%;
+    margin: 0 auto;
+  }
+}
+
+/* Mobile Landscape Size to Tablet Portrait (devices and browsers) */
+@media only screen and (min-width: 480px) and (max-width: 767px) { }
+
+/* Mobile Portrait Size to Mobile Landscape Size (devices and browsers) */
+@media only screen and (max-width: 479px) { }