dump.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. """
  2. MIT License
  3. Copyright (c) 2022 Texas Tech University
  4. Permission is hereby granted, free of charge, to any person obtaining a copy
  5. of this software and associated documentation files (the "Software"), to deal
  6. in the Software without restriction, including without limitation the rights
  7. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. copies of the Software, and to permit persons to whom the Software is
  9. furnished to do so, subject to the following conditions:
  10. The above copyright notice and this permission notice shall be included in all
  11. copies or substantial portions of the Software.
  12. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  17. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  18. SOFTWARE.
  19. """
  20. """
  21. This file is part of MonSter.
  22. Author:
  23. Jie Li, jie.li@ttu.edu
  24. """
  25. import utils
  26. import logger
  27. from pgcopy import CopyManager
  28. from dateutil import tz
  29. from dateutil.parser import parse as parse_time
  30. log = logger.get_logger(__name__)
  31. def dump_node_jobs(timestamp: object, node_jobs: dict, conn: object):
  32. """dump_node_jobs Dump Node-Jobs
  33. Dump node-jobs correlation to TimeScaleDB
  34. Args:
  35. timestamp (object): Attached timestamp
  36. node_jobs (dict): Node-jobs correlation
  37. conn (object): TimeScaleDB connection object
  38. """
  39. try:
  40. all_records = []
  41. target_table = 'slurm.node_jobs'
  42. cols = ('timestamp', 'nodeid', 'jobs', 'cpus')
  43. for node, job_info in node_jobs.items():
  44. all_records.append((timestamp, int(node), job_info['jobs'], job_info['cpus']))
  45. mgr = CopyManager(conn, target_table, cols)
  46. mgr.copy(all_records)
  47. conn.commit()
  48. except Exception as err:
  49. curs = conn.cursor()
  50. curs.execute("ROLLBACK")
  51. conn.commit()
  52. log.error(f"Fail to dump node-jobs correlation: {err}")
  53. def dump_job_metrics(job_metrics: dict, conn: object):
  54. """dump_job_metrics Dump Job Metircs
  55. Dump job metrics to TimeScaleDB
  56. Args:
  57. job_metrics (dict): Job Metrics
  58. conn (object): TimeScaleDB connection object
  59. """
  60. try:
  61. target_table = 'slurm.jobs'
  62. cols = ('job_id', 'array_job_id', 'array_task_id', 'name','job_state',
  63. 'user_id', 'user_name', 'group_id', 'cluster', 'partition',
  64. 'command', 'current_working_directory', 'batch_flag',
  65. 'batch_host', 'nodes', 'node_count', 'cpus', 'tasks',
  66. 'tasks_per_node', 'cpus_per_task', 'memory_per_node',
  67. 'memory_per_cpu', 'priority', 'time_limit', 'deadline',
  68. 'submit_time', 'preempt_time', 'suspend_time', 'eligible_time',
  69. 'start_time', 'end_time', 'resize_time', 'restart_cnt',
  70. 'exit_code', 'derived_exit_code')
  71. cur = conn.cursor()
  72. all_records = []
  73. for job in job_metrics:
  74. job_id = job[cols.index('job_id')]
  75. check_sql = f"SELECT EXISTS(SELECT 1 FROM slurm.jobs WHERE job_id={job_id})"
  76. cur.execute(check_sql)
  77. (job_exists, ) = cur.fetchall()[0]
  78. if job_exists:
  79. # Update
  80. nodes = job[cols.index('nodes')]
  81. job_state = job[cols.index('job_state')]
  82. user_name = job[cols.index('user_name')]
  83. start_time = job[cols.index('start_time')]
  84. end_time = job[cols.index('end_time')]
  85. resize_time = job[cols.index('resize_time')]
  86. restart_cnt = job[cols.index('restart_cnt')]
  87. exit_code = job[cols.index('exit_code')]
  88. derived_exit_code = job[cols.index('derived_exit_code')]
  89. update_sql = """ UPDATE slurm.jobs
  90. SET nodes = %s, job_state = %s, user_name = %s, start_time = %s, end_time = %s, resize_time = %s, restart_cnt = %s, exit_code = %s, derived_exit_code = %s
  91. WHERE job_id = %s """
  92. cur.execute(update_sql, (nodes, job_state, user_name, start_time, end_time, resize_time, restart_cnt, exit_code, derived_exit_code, job_id))
  93. else:
  94. all_records.append(job)
  95. mgr = CopyManager(conn, target_table, cols)
  96. mgr.copy(all_records)
  97. conn.commit()
  98. except Exception as err:
  99. curs = conn.cursor()
  100. curs.execute("ROLLBACK")
  101. conn.commit()
  102. log.error(f"Fail to dump job metrics: {err}")
  103. def dump_node_metrics(timestamp: object,
  104. node_metrics: dict,
  105. conn: object):
  106. """dump_node_metrics Dump Node Metrics
  107. Dump node metrics to TimeScaleDB
  108. Args:
  109. timestamp (object): attached timestamp
  110. node_metrics (dict): node metrics
  111. conn (object): TimeScaleDB connection object
  112. """
  113. schema = 'slurm'
  114. try:
  115. metric_names = list(list(node_metrics.values())[0].keys())
  116. for metric_name in metric_names:
  117. all_records = []
  118. target_table = f'{schema}.{metric_name}'
  119. cols = ('timestamp', 'nodeid', 'value')
  120. for node, node_data in node_metrics.items():
  121. all_records.append((timestamp, int(node), node_data[metric_name]))
  122. mgr = CopyManager(conn, target_table, cols)
  123. mgr.copy(all_records)
  124. conn.commit()
  125. except Exception as err:
  126. curs = conn.cursor()
  127. curs.execute("ROLLBACK")
  128. conn.commit()
  129. log.error(f"Fail to dump node metrics : {err}")