From dec4f2e9101f88f8beffabc9d0f0379323748973 Mon Sep 17 00:00:00 2001 From: Tri Dao Date: Thu, 6 Apr 2023 23:40:15 -0700 Subject: [PATCH] [FusedDense] Set workspace size to 32M for Hopper and 4M for others --- csrc/fused_dense_lib/fused_dense_cuda.cu | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/csrc/fused_dense_lib/fused_dense_cuda.cu b/csrc/fused_dense_lib/fused_dense_cuda.cu index 7b6f392..023e74c 100644 --- a/csrc/fused_dense_lib/fused_dense_cuda.cu +++ b/csrc/fused_dense_lib/fused_dense_cuda.cu @@ -122,7 +122,9 @@ int gemm_bias_act_lt( reinterpret_cast(at::cuda::getCurrentCUDABlasHandle()); // See https://github.com/pytorch/pytorch/issues/73328 for reasoning behind // setting this to 1M. - size_t workspaceSize = 1024 * 1024; + // However, Apex sets it to 4M and TransformerEngine sets to 32M for Hopper and 4M for other GPUs + // https://github.com/NVIDIA/TransformerEngine/blob/a0f0065498bbcfc1da78cf9e8b166f5381613fbc/transformer_engine/pytorch/module.py#L91 + size_t workspaceSize = 1024 * 1024 * (at::cuda::getCurrentDeviceProperties()->major >= 9 ? 32 : 4); void* workspace = at::empty( {static_cast(workspaceSize)}, at::device({at::kCUDA, at::cuda::current_device()}).dtype(at::kByte)).data_ptr(); @@ -296,7 +298,8 @@ int gemm_bgradb_lt( reinterpret_cast(at::cuda::getCurrentCUDABlasHandle()); // See https://github.com/pytorch/pytorch/issues/73328 for reasoning behind // setting this to 1M. - size_t workspaceSize = 1024 * 1024; + // However, Apex sets it to 4M and TransformerEngine sets to 32M for Hopper and 4M for other GPUs + size_t workspaceSize = 1024 * 1024 * (at::cuda::getCurrentDeviceProperties()->major >= 9 ? 32 : 4); void* workspace = at::empty( {static_cast(workspaceSize)}, at::device({at::kCUDA, at::cuda::current_device()}).dtype(at::kByte)).data_ptr(); @@ -449,7 +452,8 @@ int gemm_dact_bgradb_lt( reinterpret_cast(at::cuda::getCurrentCUDABlasHandle()); // See https://github.com/pytorch/pytorch/issues/73328 for reasoning behind // setting this to 1M. - size_t workspaceSize = 1024 * 1024; + // However, Apex sets it to 4M and TransformerEngine sets to 32M for Hopper and 4M for other GPUs + size_t workspaceSize = 1024 * 1024 * (at::cuda::getCurrentDeviceProperties()->major >= 9 ? 32 : 4); void* workspace = at::empty( {static_cast(workspaceSize)}, at::device({at::kCUDA, at::cuda::current_device()}).dtype(at::kByte)).data_ptr();