text2sql_eval.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. import argparse
  2. import json
  3. import multiprocessing as mp
  4. import re
  5. import sqlite3
  6. import sys
  7. from func_timeout import func_timeout, FunctionTimedOut
  8. def load_json(dir):
  9. with open(dir, "r") as j:
  10. contents = json.loads(j.read())
  11. return contents
  12. def result_callback(result):
  13. exec_result.append(result)
  14. def execute_sql(predicted_sql, ground_truth, db_path):
  15. conn = sqlite3.connect(db_path)
  16. # Connect to the database
  17. cursor = conn.cursor()
  18. cursor.execute(predicted_sql)
  19. predicted_res = cursor.fetchall()
  20. cursor.execute(ground_truth)
  21. ground_truth_res = cursor.fetchall()
  22. res = 0
  23. if set(predicted_res) == set(ground_truth_res):
  24. res = 1
  25. else:
  26. print(
  27. f"\n\n==== INCORRECT SQL GENERATED ====\n{predicted_sql=}\n{predicted_res=}\n{ground_truth=}\n{ground_truth_res=}\n======\n\n"
  28. )
  29. return res
  30. def execute_model(predicted_sql, ground_truth, db_place, idx, meta_time_out):
  31. try:
  32. res = func_timeout(
  33. meta_time_out, execute_sql, args=(predicted_sql, ground_truth, db_place)
  34. )
  35. except KeyboardInterrupt:
  36. sys.exit(0)
  37. except FunctionTimedOut:
  38. result = [(f"timeout",)]
  39. res = 0
  40. except Exception as e:
  41. result = [(f"error",)] # possibly len(query) > 512 or not executable
  42. res = 0
  43. result = {"sql_idx": idx, "res": res}
  44. return result
  45. def package_sqls(sql_path, db_root_path, mode="gpt", data_mode="dev"):
  46. clean_sqls = []
  47. db_path_list = []
  48. if mode == "gpt":
  49. sql_data = json.load(open(sql_path + "predict_" + data_mode + ".json", "r"))
  50. for idx, sql_str in sql_data.items():
  51. if type(sql_str) == str:
  52. sql, db_name = sql_str.split("\t----- bird -----\t")
  53. else:
  54. sql, db_name = " ", "financial"
  55. clean_sqls.append(sql)
  56. db_path_list.append(db_root_path + db_name + "/" + db_name + ".sqlite")
  57. elif mode == "gt": # ground truth
  58. items = json.load(open(db_root_path + "/../dev.json"))
  59. for item in items:
  60. sql = item["SQL"]
  61. db_name = item["db_id"]
  62. clean_sqls.append(sql)
  63. db_path_list.append(db_root_path + db_name + "/" + db_name + ".sqlite")
  64. return clean_sqls, db_path_list
  65. def run_sqls_parallel(sqls, db_places, num_cpus=1, meta_time_out=30.0):
  66. pool = mp.Pool(processes=num_cpus)
  67. for i, sql_pair in enumerate(sqls):
  68. predicted_sql, ground_truth = sql_pair
  69. pool.apply_async(
  70. execute_model,
  71. args=(predicted_sql, ground_truth, db_places[i], i, meta_time_out),
  72. callback=result_callback,
  73. )
  74. pool.close()
  75. pool.join()
  76. def sort_results(list_of_dicts):
  77. return sorted(list_of_dicts, key=lambda x: x["sql_idx"])
  78. def compute_acc_by_diff(exec_results, diff_json_path):
  79. num_queries = len(exec_results)
  80. results = [res["res"] for res in exec_results]
  81. contents = load_json(diff_json_path)
  82. simple_results, moderate_results, challenging_results = [], [], []
  83. for i, content in enumerate(contents):
  84. if content["difficulty"] == "simple":
  85. simple_results.append(exec_results[i])
  86. if content["difficulty"] == "moderate":
  87. moderate_results.append(exec_results[i])
  88. if content["difficulty"] == "challenging":
  89. challenging_results.append(exec_results[i])
  90. simple_acc = sum([res["res"] for res in simple_results]) / len(simple_results)
  91. moderate_acc = sum([res["res"] for res in moderate_results]) / len(moderate_results)
  92. challenging_acc = (
  93. 0
  94. if len(challenging_results) == 0
  95. else sum([res["res"] for res in challenging_results]) / len(challenging_results)
  96. )
  97. all_acc = sum(results) / num_queries
  98. count_lists = [
  99. len(simple_results),
  100. len(moderate_results),
  101. len(challenging_results),
  102. num_queries,
  103. ]
  104. return (
  105. simple_acc * 100,
  106. moderate_acc * 100,
  107. challenging_acc * 100,
  108. all_acc * 100,
  109. count_lists,
  110. )
  111. def print_data(score_lists, count_lists):
  112. levels = ["simple", "moderate", "challenging", "total"]
  113. print("{:20} {:20} {:20} {:20} {:20}".format("", *levels))
  114. print("{:20} {:<20} {:<20} {:<20} {:<20}".format("count", *count_lists))
  115. print(
  116. "====================================== ACCURACY ====================================="
  117. )
  118. print(
  119. "{:20} {:<20.2f} {:<20.2f} {:<20.2f} {:<20.2f}".format("accuracy", *score_lists)
  120. )
  121. if __name__ == "__main__":
  122. args_parser = argparse.ArgumentParser()
  123. args_parser.add_argument(
  124. "--predicted_sql_path", type=str, required=True, default=""
  125. )
  126. args_parser.add_argument("--ground_truth_path", type=str, required=True, default="")
  127. args_parser.add_argument("--data_mode", type=str, default="dev")
  128. args_parser.add_argument("--db_root_path", type=str, required=True, default="")
  129. args_parser.add_argument("--num_cpus", type=int, default=1)
  130. args_parser.add_argument("--meta_time_out", type=float, default=30.0)
  131. args_parser.add_argument("--mode_gt", type=str, default="gt")
  132. args_parser.add_argument("--mode_predict", type=str, default="gpt")
  133. args_parser.add_argument("--difficulty", type=str, default="simple")
  134. args_parser.add_argument("--diff_json_path", type=str, default="")
  135. args = args_parser.parse_args()
  136. exec_result = []
  137. pred_queries, db_paths = package_sqls(
  138. args.predicted_sql_path,
  139. args.db_root_path,
  140. mode=args.mode_predict,
  141. data_mode=args.data_mode,
  142. )
  143. # generate gt sqls:
  144. gt_queries, db_paths_gt = package_sqls(
  145. args.ground_truth_path, args.db_root_path, mode="gt", data_mode=args.data_mode
  146. )
  147. query_pairs = list(zip(pred_queries, gt_queries))
  148. run_sqls_parallel(
  149. query_pairs,
  150. db_places=db_paths,
  151. num_cpus=args.num_cpus,
  152. meta_time_out=args.meta_time_out,
  153. )
  154. exec_result = sort_results(exec_result)
  155. print("Evaluating statistics...")
  156. simple_acc, moderate_acc, challenging_acc, acc, count_lists = compute_acc_by_diff(
  157. exec_result, args.diff_json_path
  158. )
  159. score_lists = [simple_acc, moderate_acc, challenging_acc, acc]
  160. print_data(score_lists, count_lists)
  161. print(
  162. "==========================================================================================="
  163. )