[CI] Fix CUDA 12.2.2 compilation
This commit is contained in:
parent
f9d7376126
commit
e43a4ceaab
6
.github/workflows/publish.yml
vendored
6
.github/workflows/publish.yml
vendored
@ -64,11 +64,11 @@ jobs:
|
||||
python-version: '3.7'
|
||||
# Pytorch <= 2.0 only supports CUDA <= 11.8
|
||||
- torch-version: '1.12.1'
|
||||
cuda-version: '12.2.0'
|
||||
cuda-version: '12.2.2'
|
||||
- torch-version: '1.13.1'
|
||||
cuda-version: '12.2.0'
|
||||
cuda-version: '12.2.2'
|
||||
- torch-version: '2.0.1'
|
||||
cuda-version: '12.2.0'
|
||||
cuda-version: '12.2.2'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
__version__ = "2.4.3"
|
||||
__version__ = "2.4.3.post1"
|
||||
|
||||
from flash_attn.flash_attn_interface import (
|
||||
flash_attn_func,
|
||||
|
||||
@ -85,7 +85,7 @@ RUN pip install transformers==4.25.1 datasets==2.8.0 pytorch-lightning==1.8.6 tr
|
||||
RUN pip install git+https://github.com/mlcommons/logging.git@2.1.0
|
||||
|
||||
# Install FlashAttention
|
||||
RUN pip install flash-attn==2.4.3
|
||||
RUN pip install flash-attn==2.4.3.post1
|
||||
|
||||
# Install CUDA extensions for fused dense
|
||||
RUN pip install git+https://github.com/HazyResearch/flash-attention@v2.4.2#subdirectory=csrc/fused_dense_lib
|
||||
|
||||
Loading…
Reference in New Issue
Block a user