diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 70195b4..6765bee 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -151,7 +151,7 @@ jobs: export PATH=/usr/local/nvidia/bin:/usr/local/nvidia/lib64:$PATH export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/cuda/lib64:$LD_LIBRARY_PATH # Currently for this setting the runner goes OOM if we pass --threads 4 to nvcc - if [[ ${MATRIX_CUDA_VERSION} =~ "12." && ${MATRIX_TORCH_VERSION} == "2.1" ]]; then + if [[ ( ${MATRIX_CUDA_VERSION} == "121" || ${MATRIX_CUDA_VERSION} == "122" ) && ${MATRIX_TORCH_VERSION} == "2.1" ]]; then export FLASH_ATTENTION_FORCE_SINGLE_THREAD="TRUE" fi # Limit MAX_JOBS otherwise the github runner goes OOM diff --git a/flash_attn/__init__.py b/flash_attn/__init__.py index 5efd449..6695f00 100644 --- a/flash_attn/__init__.py +++ b/flash_attn/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.1.2.post2" +__version__ = "2.1.2.post3" from flash_attn.flash_attn_interface import ( flash_attn_func, diff --git a/training/Dockerfile b/training/Dockerfile index 133703e..009770d 100644 --- a/training/Dockerfile +++ b/training/Dockerfile @@ -85,11 +85,11 @@ RUN pip install transformers==4.25.1 datasets==2.8.0 pytorch-lightning==1.8.6 tr RUN pip install git+https://github.com/mlcommons/logging.git@2.1.0 # Install FlashAttention -RUN pip install flash-attn==2.1.2.post2 +RUN pip install flash-attn==2.1.2.post3 # Install CUDA extensions for cross-entropy, fused dense, layer norm RUN git clone https://github.com/HazyResearch/flash-attention \ - && cd flash-attention && git checkout v2.1.2.post2 \ + && cd flash-attention && git checkout v2.1.2.post3 \ && cd csrc/fused_softmax && pip install . && cd ../../ \ && cd csrc/rotary && pip install . && cd ../../ \ && cd csrc/xentropy && pip install . && cd ../../ \