scaled_masked_softmax_cuda.cu 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /* coding=utf-8
  2. * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <ATen/ATen.h>
  17. #include <cuda.h>
  18. #include <cuda_runtime.h>
  19. #include <cuda_fp16.h>
  20. #include <cuda_profiler_api.h>
  21. #include <ATen/cuda/CUDAContext.h>
  22. #include <torch/extension.h>
  23. #include "scaled_masked_softmax.h"
  24. #include "type_shim.h"
  25. namespace multihead_attn {
  26. namespace fused_softmax {
  27. namespace scaled_masked_softmax {
  28. torch::Tensor fwd_cuda(
  29. torch::Tensor const& input,
  30. torch::Tensor const& mask,
  31. float scale_factor)
  32. {
  33. // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
  34. const int batches = input.size(0);
  35. const int pad_batches = mask.size(0);
  36. const int attn_heads = input.size(1);
  37. const int query_seq_len = input.size(2);
  38. const int key_seq_len = input.size(3);
  39. TORCH_INTERNAL_ASSERT(key_seq_len <= 2048);
  40. TORCH_INTERNAL_ASSERT(query_seq_len > 1);
  41. TORCH_INTERNAL_ASSERT(pad_batches == 1 || pad_batches == batches);
  42. TORCH_INTERNAL_ASSERT(mask.size(1) == 1);
  43. TORCH_INTERNAL_ASSERT(mask.size(2) == query_seq_len);
  44. TORCH_INTERNAL_ASSERT(mask.size(3) == key_seq_len);
  45. // Output
  46. auto act_options = input.options().requires_grad(false);
  47. torch::Tensor softmax_results =
  48. torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options);
  49. // Softmax Intermediate Result Ptr
  50. void* input_ptr = static_cast<void*>(input.data_ptr());
  51. void* mask_ptr = static_cast<void*>(mask.data_ptr());
  52. void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
  53. DISPATCH_HALF_AND_BFLOAT(
  54. input.scalar_type(),
  55. "dispatch_scaled_masked_softmax_forward",
  56. dispatch_scaled_masked_softmax_forward<scalar_t, scalar_t, float>(
  57. reinterpret_cast<scalar_t*>(softmax_results_ptr),
  58. reinterpret_cast<const scalar_t*>(input_ptr),
  59. reinterpret_cast<const uint8_t*>(mask_ptr),
  60. scale_factor,
  61. query_seq_len,
  62. key_seq_len,
  63. batches,
  64. attn_heads,
  65. pad_batches);
  66. );
  67. return softmax_results;
  68. }
  69. torch::Tensor bwd_cuda(
  70. torch::Tensor const& output_grads_,
  71. torch::Tensor const& softmax_results_,
  72. float scale_factor) {
  73. auto output_grads = output_grads_.contiguous();
  74. auto softmax_results = softmax_results_.contiguous();
  75. //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
  76. const int batches = output_grads.size(0);
  77. const int attn_heads = output_grads.size(1);
  78. const int query_seq_len = output_grads.size(2);
  79. const int key_seq_len = output_grads.size(3);
  80. void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr());
  81. //Softmax Grad
  82. DISPATCH_HALF_AND_BFLOAT(
  83. output_grads_.scalar_type(),
  84. "dispatch_scaled_masked_softmax_backward",
  85. dispatch_scaled_masked_softmax_backward<scalar_t, scalar_t, float>(
  86. reinterpret_cast<scalar_t*>(output_grads_ptr),
  87. reinterpret_cast<scalar_t*>(output_grads_ptr),
  88. reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()),
  89. scale_factor,
  90. query_seq_len,
  91. key_seq_len,
  92. batches,
  93. attn_heads);
  94. );
  95. //backward pass is completely in-place
  96. return output_grads;
  97. }
  98. }
  99. }
  100. }