sql.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. """
  2. MIT License
  3. Copyright (c) 2022 Texas Tech University
  4. Permission is hereby granted, free of charge, to any person obtaining a copy
  5. of this software and associated documentation files (the "Software"), to deal
  6. in the Software without restriction, including without limitation the rights
  7. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. copies of the Software, and to permit persons to whom the Software is
  9. furnished to do so, subject to the following conditions:
  10. The above copyright notice and this permission notice shall be included in all
  11. copies or substantial portions of the Software.
  12. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  17. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  18. SOFTWARE.
  19. """
  20. """
  21. This file is part of MonSter.
  22. Author:
  23. Jie Li, jie.li@ttu.edu
  24. """
  25. import utils
  26. import logger
  27. from pgcopy import CopyManager
  28. from datetime import datetime
  29. log = logger.get_logger(__name__)
  30. def generate_metric_table_sqls(table_schemas: dict,
  31. schema_name: str):
  32. """generate_metric_table_sqls General Metric Table Sqls
  33. Generate sqls for creating metric tables
  34. Args:
  35. table_schemas (dict): table schemas
  36. schema_name (str): schema name
  37. Returns:
  38. dict: sql statements
  39. """
  40. sql_statements = {}
  41. try:
  42. schema_sql = f"CREATE SCHEMA IF NOT EXISTS {schema_name};"
  43. sql_statements.update({
  44. 'schema_sql': schema_sql
  45. })
  46. tables_sql = []
  47. for table, column in table_schemas.items():
  48. column_names = column['column_names']
  49. column_types = column['column_types']
  50. column_str = ''
  51. for i, column in enumerate(column_names):
  52. column_str += f'{column} {column_types[i]}, '
  53. table_sql = f"CREATE TABLE IF NOT EXISTS {schema_name}.{table} \
  54. ({column_str}FOREIGN KEY (NodeID) REFERENCES nodes (NodeID));"
  55. tables_sql.append(table_sql)
  56. sql_statements.update({
  57. 'tables_sql': tables_sql,
  58. })
  59. except Exception as err:
  60. log.error(f'Cannot Genrerate Metric Table Sqls: {err}')
  61. return sql_statements
  62. def generate_slurm_job_table_sql(schema_name: str):
  63. """generate_slurm_job_table_sql Generate Slurm Job Table Sql
  64. Generate sqls for creating the table that stores the jobs info
  65. Args:
  66. schema_name (str): schema name
  67. Returns:
  68. dict: sql statements
  69. """
  70. sql_statements = {}
  71. table = 'jobs'
  72. try:
  73. schema_sql = f"CREATE SCHEMA if NOT EXISTS {schema_name}"
  74. sql_statements.update({
  75. 'schema_sql': schema_sql
  76. })
  77. tables_sql = []
  78. column_names = ['job_id', 'array_job_id', 'array_task_id', 'name',
  79. 'job_state', 'user_id', 'user_name', 'group_id',
  80. 'cluster', 'partition', 'command',
  81. 'current_working_directory', 'batch_flag', 'batch_host',
  82. 'nodes', 'node_count', 'cpus', 'tasks',
  83. 'tasks_per_node', 'cpus_per_task', 'memory_per_node',
  84. 'memory_per_cpu', 'priority', 'time_limit', 'deadline',
  85. 'submit_time', 'preempt_time', 'suspend_time',
  86. 'eligible_time', 'start_time', 'end_time',
  87. 'resize_time', 'restart_cnt', 'exit_code',
  88. 'derived_exit_code']
  89. column_types = ['INT PRIMARY KEY', 'INT', 'INT', 'TEXT', 'TEXT', 'INT',
  90. 'TEXT', 'INT', 'TEXT', 'TEXT', 'TEXT', 'TEXT',
  91. 'BOOLEAN', 'TEXT', 'TEXT[]', 'INT', 'INT', 'INT', 'INT',
  92. 'INT', 'INT', 'INT', 'INT', 'INT', 'INT', 'INT', 'INT',
  93. 'INT', 'INT', 'INT', 'INT', 'INT', 'INT', 'INT', 'INT']
  94. column_str = ''
  95. for i, column in enumerate(column_names):
  96. column_str += f'{column} {column_types[i]}, '
  97. table_sql = f"CREATE TABLE IF NOT EXISTS {schema_name}.{table} \
  98. ({column_str[:-2]});"
  99. tables_sql.append(table_sql)
  100. sql_statements.update({
  101. 'tables_sql': tables_sql,
  102. })
  103. except Exception as err:
  104. print(err)
  105. log.error(f'Cannot Genrerate Job Table Sqls: {err}')
  106. return sql_statements
  107. def generate_metric_def_table_sql():
  108. """generate_metrics_def_table_sql Generate Metrics Definition Table Sql
  109. Generate a sql for creating the metrics definition table
  110. Returns:
  111. str: sql string
  112. """
  113. metric_def_table_sql = "CREATE TABLE IF NOT EXISTS metrics_definition \
  114. (id SERIAL PRIMARY KEY, metric_id TEXT NOT NULL, metric_name TEXT, \
  115. description TEXT, metric_type TEXT, metric_data_type TEXT, \
  116. units TEXT, accuracy REAL, sensing_interval TEXT, \
  117. discrete_values TEXT[], data_type TEXT, UNIQUE (id));"
  118. return metric_def_table_sql
  119. def generate_metadata_table_sql(nodes_metadata: list, table_name: str):
  120. """generate_metadata_table_sql Generate Metadata Table Sql
  121. Generate a sql for creating the node metadata table
  122. Args:
  123. nodes_metadata (list): nodes metadata list
  124. table_name (str): table name
  125. Returns:
  126. str: sql string
  127. """
  128. column_names = list(nodes_metadata[0].keys())
  129. column_str = ""
  130. for i, column in enumerate(column_names):
  131. column_str += column + " TEXT, "
  132. column_str = column_str[:-2]
  133. metadata_table_sql = f" CREATE TABLE IF NOT EXISTS {table_name} \
  134. ( NodeID SERIAL PRIMARY KEY, {column_str}, UNIQUE (NodeID));"
  135. return metadata_table_sql
  136. def update_nodes_metadata(conn: object, nodes_metadata: list, table_name: str):
  137. """update_nodes_metadata Update Nodes Metadata
  138. Update nodes metadata table
  139. Args:
  140. conn (object): database connection
  141. nodes_metadata (list): nodes metadata list
  142. table_name (str): table name
  143. """
  144. cur = conn.cursor()
  145. for record in nodes_metadata:
  146. col_sql = ""
  147. os_ip_addr = record['Os_Ip_Addr']
  148. for col, value in record.items():
  149. if col != 'Os_Ip_Addr' and col != 'servicetag':
  150. col_value = col.lower() + " = '" + str(value) + "', "
  151. col_sql += col_value
  152. col_sql = col_sql[:-2]
  153. sql = "UPDATE " + table_name + " SET " + col_sql \
  154. + " WHERE os_ip_addr = '" + os_ip_addr + "';"
  155. cur.execute(sql)
  156. conn.commit()
  157. cur.close()
  158. def insert_nodes_metadata(conn: object, nodes_metadata: list, table_name: str):
  159. """insert_nodes_metadata Insert Nodes Metadata
  160. Insert nodes metadata to metadata table
  161. Args:
  162. conn (object): database connection
  163. nodes_metadata (list): nodes metadata list
  164. table_name (str): table name
  165. """
  166. cols = tuple([col.lower() for col in list(nodes_metadata[0].keys())])
  167. records = []
  168. for record in nodes_metadata:
  169. values = [str(value) for value in record.values()]
  170. records.append(tuple(values))
  171. mgr = CopyManager(conn, table_name, cols)
  172. mgr.copy(records)
  173. conn.commit()
  174. def check_table_exist(conn: object, table_name: str):
  175. """check_table_exist Check Table Exists
  176. Check if the specified table exists or not
  177. Args:
  178. conn (object): database connection
  179. table_name (str): table name
  180. Returns:
  181. bool: True if exists, false otherwise
  182. """
  183. cur = conn.cursor()
  184. table_exists = False
  185. sql = "SELECT EXISTS (SELECT FROM pg_tables WHERE tablename = '" + table_name + "');"
  186. cur.execute(sql)
  187. (table_exists, ) = cur.fetchall()[0]
  188. if table_exists:
  189. data_exists = False
  190. sql = "SELECT EXISTS (SELECT * from " + table_name + ");"
  191. cur.execute(sql)
  192. (data_exists, ) = cur.fetchall()[0]
  193. return data_exists
  194. return False
  195. def write_metric_definitions(conn: object, metric_definitions: list):
  196. """write_metric_definitions Write Metric Definitions
  197. Write metric definitions to the table
  198. Args:
  199. conn (object): database connection
  200. metric_definitions (list): the metric definitions
  201. """
  202. if not check_table_exist(conn, 'metrics_definition'):
  203. cols = ('metric_id', 'metric_name', 'description', 'metric_type',
  204. 'metric_data_type', 'units', 'accuracy', 'sensing_interval',
  205. 'discrete_values', 'data_type')
  206. metric_definitions_table = [(i['Id'], i['Name'], i['Description'],
  207. i['MetricType'], i['MetricDataType'], i['Units'], i['Accuracy'],
  208. i['SensingInterval'], i['DiscreteValues'],
  209. utils.data_type_mapping[i['MetricDataType']])for i in metric_definitions]
  210. # Sort
  211. metric_definitions_table = utils.sort_tuple_list(metric_definitions_table)
  212. mgr = CopyManager(conn, 'metrics_definition', cols)
  213. mgr.copy(metric_definitions_table)
  214. conn.commit()
  215. def write_nodes_metadata(conn: object, nodes_metadata: list):
  216. """write_nodes_metadata Write Nodes Metadata
  217. Write nodes metadata to the table
  218. Args:
  219. conn (object): database connection
  220. nodes_metadata (list): nodes metadata list
  221. """
  222. if not check_table_exist(conn, 'nodes'):
  223. insert_nodes_metadata(conn, nodes_metadata, 'nodes')
  224. else:
  225. update_nodes_metadata(conn, nodes_metadata, 'nodes')
  226. def generate_slurm_sql(metric: str,
  227. start: str,
  228. end: str,
  229. interval: str,
  230. aggregate: str):
  231. """generate_slurm_sql Generate Slurm Sql
  232. Generate sql for querying slurm metrics
  233. Args:
  234. metric (str): metric name
  235. start (str): start of time range
  236. end (str): end of time range
  237. interval (str): aggregation interval
  238. aggregate (str): aggregation function
  239. Returns:
  240. string: sql string
  241. """
  242. sql = ""
  243. if metric == 'node_jobs':
  244. sql = f"SELECT time_bucket_gapfill('{interval}', timestamp) AS time, \
  245. nodeid, jsonb_agg(jobs) AS jobs, jsonb_agg(cpus) AS cpus \
  246. FROM slurm.{metric} \
  247. WHERE timestamp >= '{start}' \
  248. AND timestamp <= '{end}' \
  249. GROUP BY time, nodeid \
  250. ORDER BY time;"
  251. else:
  252. sql = f"SELECT time_bucket_gapfill('{interval}', timestamp) AS time, \
  253. nodeid, {aggregate}(value) AS value\
  254. FROM slurm.{metric} \
  255. WHERE timestamp >= '{start}' \
  256. AND timestamp <= '{end}' \
  257. GROUP BY time, nodeid \
  258. ORDER BY time;"
  259. return sql
  260. def generate_slurm_jobs_sql(start: str,end: str):
  261. """generate_slurm_jobs_sql Generate Slurm Jobs Sql
  262. Generate Sql for querying slurm jobs info
  263. Args:
  264. start (str): start time
  265. end (str): end time
  266. Returns:
  267. string: sql string
  268. """
  269. utc_from = datetime.strptime(start, '%Y-%m-%dT%H:%M:%S.%fZ')
  270. epoch_from = int((utc_from - datetime(1970, 1, 1)).total_seconds())
  271. utc_to = datetime.strptime(end, '%Y-%m-%dT%H:%M:%S.%fZ')
  272. epoch_to = int((utc_to - datetime(1970, 1, 1)).total_seconds())
  273. sql = f"SELECT * FROM slurm.jobs \
  274. WHERE start_time < {epoch_to} \
  275. AND end_time > {epoch_from};"
  276. return sql
  277. def generate_node_jobs_sql(start: str, end: str, interval: str):
  278. """gene_node_jobs_sql Generate Node-Jobs Sql
  279. Generate SQL for querying node-jobs correlation
  280. Args:
  281. start (str): start time
  282. end (str): end time
  283. interval (str): interval for aggragation
  284. Returns:
  285. string: sql string
  286. """
  287. sql = f"SELECT time_bucket_gapfill('{interval}', timestamp) AS time, \
  288. nodeid, jsonb_agg(jobs) AS jobs, jsonb_agg(cpus) AS cpus \
  289. FROM slurm.node_jobs \
  290. WHERE timestamp >= '{start}' \
  291. AND timestamp <= '{end}' \
  292. GROUP BY time, nodeid \
  293. ORDER BY time;"
  294. return sql