grad_scaler.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. # coding=utf-8
  2. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Megatron grad scaler."""
  16. from abc import ABC
  17. from abc import abstractmethod
  18. import torch
  19. class MegatronGradScaler(ABC):
  20. def __init__(self, initial_scale):
  21. """Initialize scale value with the input initial scale."""
  22. assert initial_scale > 0.0
  23. self._scale = torch.cuda.FloatTensor([initial_scale])
  24. @property
  25. def scale(self):
  26. return self._scale
  27. @property
  28. def inv_scale(self):
  29. return self._scale.double().reciprocal().float()
  30. @abstractmethod
  31. def update(self, found_inf):
  32. pass
  33. @abstractmethod
  34. def state_dict(self):
  35. pass
  36. @abstractmethod
  37. def load_state_dict(self, state_dict):
  38. pass
  39. class ConstantGradScaler(MegatronGradScaler):
  40. def update(self, found_inf):
  41. pass
  42. def state_dict(self):
  43. return dict()
  44. def load_state_dict(self, state_dict):
  45. pass
  46. class DynamicGradScaler(MegatronGradScaler):
  47. def __init__(self, initial_scale, min_scale,
  48. growth_factor, backoff_factor,
  49. growth_interval, hysteresis):
  50. """"Grad scaler with dynamic scale that gets adjusted
  51. during training."""
  52. super(DynamicGradScaler, self).__init__(initial_scale)
  53. # Lower bound on the scale.
  54. assert min_scale > 0.0
  55. assert min_scale <= initial_scale
  56. self.min_scale = torch.cuda.FloatTensor([min_scale])
  57. # Growth and backoff factors for the scale.
  58. assert growth_factor > 1.0
  59. self.growth_factor = torch.cuda.FloatTensor([growth_factor])
  60. assert backoff_factor < 1.0
  61. assert backoff_factor > 0.0
  62. self.backoff_factor = torch.cuda.FloatTensor([backoff_factor])
  63. # Interval over which if we don't see any inf/nan,
  64. # we will scale the grad scale by the growth factor.
  65. assert growth_interval > 0
  66. self.growth_interval = growth_interval
  67. # Number of inf/nans we should see before scaling down
  68. # the grad scale by the backoff factor.
  69. assert hysteresis > 0
  70. self.hysteresis = hysteresis
  71. # Trackers.
  72. self._growth_tracker = 0
  73. self._hysteresis_tracker = self.hysteresis
  74. def update(self, found_inf):
  75. # If we have an inf/nan, growth tracker is set to 0
  76. # and hysterisis tracker is reduced by 1.
  77. if found_inf:
  78. self._growth_tracker = 0
  79. self._hysteresis_tracker -= 1
  80. # Now if we are out of hysteresis count, scale down the loss.
  81. if self._hysteresis_tracker <= 0:
  82. self._scale = torch.max(self._scale * self.backoff_factor,
  83. self.min_scale)
  84. else:
  85. # If there is no nan/inf, increment the growth tracker.
  86. self._growth_tracker += 1
  87. # If we have had enough consequitive intervals with no nan/inf:
  88. if self._growth_tracker == self.growth_interval:
  89. # Reset the tracker and hysteresis trackers,
  90. self._growth_tracker = 0
  91. self._hysteresis_tracker = self.hysteresis
  92. # and scale up the loss scale.
  93. self._scale = self._scale * self.growth_factor
  94. def state_dict(self):
  95. state_dict = {}
  96. state_dict['scale'] = self._scale
  97. state_dict['growth_tracker'] = self._growth_tracker
  98. state_dict['hysteresis_tracker'] = self._hysteresis_tracker
  99. return state_dict
  100. def load_state_dict(self, state_dict):
  101. self._scale = state_dict['scale'].cuda(torch.cuda.current_device())
  102. self._growth_tracker = state_dict['growth_tracker']
  103. self._hysteresis_tracker = state_dict['hysteresis_tracker']