scaled_upper_triang_masked_softmax.cpp 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. /* coding=utf-8
  2. * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <cuda_fp16.h>
  17. #include <torch/extension.h>
  18. #include <vector>
  19. namespace multihead_attn {
  20. namespace fused_softmax {
  21. namespace scaled_upper_triang_masked_softmax {
  22. torch::Tensor fwd_cuda(
  23. torch::Tensor const& input,
  24. float scale_factor);
  25. torch::Tensor bwd_cuda(
  26. torch::Tensor const& output_grads,
  27. torch::Tensor const& softmax_results,
  28. float scale_factor);
  29. torch::Tensor fwd(torch::Tensor const& input, float scale_factor) {
  30. AT_ASSERTM(input.dim() == 3, "expected 3D tensor");
  31. AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
  32. (input.scalar_type() == at::ScalarType::BFloat16),
  33. "Only fp16 and bf16 are supported");
  34. return fwd_cuda(input, scale_factor);
  35. }
  36. torch::Tensor bwd(
  37. torch::Tensor const& output_grads,
  38. torch::Tensor const& softmax_results,
  39. float scale_factor) {
  40. AT_ASSERTM(output_grads.dim() == 3, "expected 3D tensor");
  41. AT_ASSERTM(softmax_results.dim() == 3, "expected 3D tensor");
  42. AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||
  43. (output_grads.scalar_type() == at::ScalarType::BFloat16),
  44. "Only fp16 and bf16 are supported");
  45. AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||
  46. (softmax_results.scalar_type() == at::ScalarType::BFloat16),
  47. "Only fp16 and bf16 are supported");
  48. return bwd_cuda(output_grads, softmax_results, scale_factor);
  49. }
  50. } // end namespace scaled_upper_triang_masked_softmax
  51. } // end namespace fused_softmax
  52. } // end namespace multihead_attn
  53. PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  54. m.def("forward",
  55. &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::fwd,
  56. "Self Multihead Attention scaled, time masked softmax -- Forward.");
  57. m.def("backward",
  58. &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::bwd,
  59. "Self Multihead Attention scaled, time masked softmax -- Backward.");
  60. }