github.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. '''
  2. Module github.py contains a handful of methods
  3. for interacting with Github data.
  4. '''
  5. import requests
  6. import simplejson as json
  7. ########################
  8. # GITHUB API CONSTANTS #
  9. ########################
  10. API_BASE_URL = 'https://api.github.com/users/DrkSephy'
  11. def getUserData(clientID, clientSecret):
  12. '''Get generic Github User data.'''
  13. url = API_BASE_URL + '?' + clientID + '&' + clientSecret
  14. req = requests.get(url)
  15. jsonList = []
  16. jsonList.append(json.loads(req.content))
  17. parsedData = []
  18. userData = {}
  19. for data in jsonList:
  20. userData['name'] = data['name']
  21. userData['blog'] = data['blog']
  22. userData['email'] = data['email']
  23. userData['public_gists'] = data['public_gists']
  24. userData['public_repos'] = data['public_repos']
  25. userData['avatar_url'] = data['avatar_url']
  26. userData['followers'] = data['followers']
  27. userData['following'] = data['following']
  28. parsedData.append(userData)
  29. return parsedData
  30. def getUserRepositories(clientID, clientSecret):
  31. '''Get a list of all repositories owned by a User.'''
  32. # Which page number of data are we looking at?
  33. pageNumber = 1
  34. # List of all our json
  35. jsonList = []
  36. # List of all repositories
  37. repositories = []
  38. # IDEA: Repeatedly loop over urls and check if the content has less than 30 entries.
  39. # If it does, then we have iterated over all the data. Time to parse it.
  40. while True:
  41. req = requests.get('https://api.github.com/users/DrkSephy/repos?page=' + str(pageNumber) + '&' + clientID + '&' + clientSecret)
  42. jsonList.append(json.loads(req.content))
  43. if len(json.loads(req.content)) < 30:
  44. break
  45. elif len(json.loads(req.content)) >= 30:
  46. pageNumber += 1
  47. # Loop over our data and extract all of the repository names
  48. for data in jsonList:
  49. for datum in data:
  50. repositories.append(datum['name'])
  51. return repositories
  52. def getForkedRepositories(clientID, clientSecret):
  53. '''Get a list of all forked repositories by a user.'''
  54. # Which page number of data are we looking at?
  55. pageNumber = 1
  56. # List of all our json
  57. jsonList = []
  58. # List of all repositories
  59. forkedRepositories = []
  60. # IDEA: Repeatedly loop over urls and check if the content has less than 30 entries.
  61. # If it does, then we have iterated over all the data. Time to parse it.
  62. while True:
  63. req = requests.get('https://api.github.com/users/DrkSephy/repos?page=' + str(pageNumber) + '&' + clientID + '&' + clientSecret)
  64. jsonList.append(json.loads(req.content))
  65. if len(json.loads(req.content)) < 30:
  66. break
  67. elif len(json.loads(req.content)) >= 30:
  68. pageNumber += 1
  69. # Loop over our data and extract all of the repository names
  70. forkedRepos = {}
  71. for data in jsonList:
  72. for datum in data:
  73. if datum['fork'] == True:
  74. #print datum['name']
  75. forkedRepos['name'] = datum['name']
  76. forkedRepositories.append(forkedRepos)
  77. forkedRepos = {}
  78. return forkedRepositories
  79. def getTopContributedRepositories(repos, clientID, clientSecret):
  80. '''Get a list of all commits for each repository owned.'''
  81. jsonList = []
  82. for repo in repos:
  83. # print repo
  84. req = requests.get('https://api.github.com/repos/DrkSephy/' + repo + '/stats/contributors' + '?' + clientID + '&' + clientSecret)
  85. jsonList.append(json.loads(req.content))
  86. parsedData = []
  87. # Keep track of which JSON set we are processing to get the repo name
  88. indexNumber = -1
  89. for item in jsonList:
  90. indexNumber += 1
  91. commits = {}
  92. for data in item:
  93. if data['author']['login'] == 'DrkSephy':
  94. commits['author'] = data['author']['login']
  95. commits['total'] = data['total']
  96. commits['repo_name'] = repos[indexNumber]
  97. parsedData.append(commits)
  98. return parsedData
  99. def filterCommits(data):
  100. '''Returns the top 10 committed repositories.'''
  101. maxCommits = []
  102. for i in range(1, 10):
  103. maxCommitedRepo = max(data, key=lambda x:x['total'])
  104. maxCommits.append(maxCommitedRepo)
  105. index = data.index(maxCommitedRepo)
  106. data.pop(index)
  107. return maxCommits
  108. def getStarGazerCount(clientID, clientSecret):
  109. '''Get Stargazer counts for all repositories.'''
  110. # Which page number of data are we looking at?
  111. pageNumber = 1
  112. # List of all our json
  113. jsonList = []
  114. # List of all repositories
  115. stargazers = []
  116. # IDEA: Repeatedly loop over urls and check if the content has less than 30 entries.
  117. # If it does, then we have iterated over all the data. Time to parse it.
  118. while True:
  119. req = requests.get('https://api.github.com/users/DrkSephy/repos?page=' + str(pageNumber) + '&' + clientID + '&' + clientSecret)
  120. jsonList.append(json.loads(req.content))
  121. if len(json.loads(req.content)) < 30:
  122. break
  123. elif len(json.loads(req.content)) >= 30:
  124. pageNumber += 1
  125. # Loop over our data and extract all of the repository names
  126. for data in jsonList:
  127. for datum in data:
  128. starData = {}
  129. starData['stargazers_count'] = datum['stargazers_count']
  130. starData['name'] = datum['name']
  131. stargazers.append(starData)
  132. return stargazers
  133. def filterStarGazerCount(data):
  134. '''Return top 10 starred repositories.'''
  135. maxStars= []
  136. for i in range(1, 10):
  137. maxStarGazers = max(data, key=lambda x:x['stargazers_count'])
  138. maxStars.append(maxStarGazers)
  139. index = data.index(maxStarGazers)
  140. data.pop(index)
  141. return maxStars