group_duplicate_url.py 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. # coding=utf-8
  2. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import json
  16. import time
  17. import sys
  18. if __name__ == '__main__':
  19. print('grouping duplicate urls ...')
  20. input = sys.argv[1]
  21. output = sys.argv[2]
  22. if len(sys.argv) > 3:
  23. jaccard_similarity_threshold = float(sys.argv[3])
  24. else:
  25. jaccard_similarity_threshold = 0.7
  26. url_to_index = {}
  27. index_to_urls = []
  28. counter = 0
  29. start_time = time.time()
  30. with open(input, 'r') as f:
  31. for line in f:
  32. counter += 1
  33. myjson = json.loads(line)
  34. urls = []
  35. for main_url in myjson.keys():
  36. urls.append(main_url)
  37. for value in myjson[main_url]:
  38. for other_url, js in value.items():
  39. if js >= jaccard_similarity_threshold:
  40. urls.append(other_url)
  41. current_index = -1
  42. other_indices = set()
  43. for url in urls:
  44. if url in url_to_index:
  45. if current_index == -1:
  46. current_index = url_to_index[url]
  47. elif current_index != url_to_index[url]:
  48. other_indices.add(url_to_index[url])
  49. if current_index == -1:
  50. current_index = len(index_to_urls)
  51. index_to_urls.append(set())
  52. for url in urls:
  53. url_to_index[url] = current_index
  54. index_to_urls[current_index].add(url)
  55. for index in other_indices:
  56. for url in index_to_urls[index]:
  57. index_to_urls[current_index].add(url)
  58. url_to_index[url] = current_index
  59. index_to_urls[index] = None
  60. if counter % 100000 == 0:
  61. print(' > processed {} lines in {} seconds ...'.format(
  62. counter, time.time() - start_time))
  63. total_remove = 0
  64. total_remain = 0
  65. for urls in index_to_urls:
  66. if urls is not None:
  67. if len(urls) > 1:
  68. total_remove += (len(urls) - 1)
  69. total_remain += 1
  70. print('out of {} urls, only {} are unique and {} should be removed'.format(
  71. total_remove+total_remain, total_remain, total_remove))
  72. with open(output, 'wb') as f:
  73. for i, urls in enumerate(index_to_urls):
  74. if urls is not None:
  75. if len(urls) > 1:
  76. myjson = json.dumps({str(i): list(urls)},
  77. ensure_ascii=False)
  78. f.write(myjson.encode('utf-8'))
  79. f.write('\n'.encode('utf-8'))