From 054816177e5bfe8f1e9883d9e03fe9bb8473410c Mon Sep 17 00:00:00 2001 From: Tri Dao Date: Sun, 20 Nov 2022 22:35:59 -0800 Subject: [PATCH] Bump version to 0.2.1 --- flash_attn/losses/cross_entropy_parallel.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flash_attn/losses/cross_entropy_parallel.py b/flash_attn/losses/cross_entropy_parallel.py index ebe4d1b..b9f5c59 100644 --- a/flash_attn/losses/cross_entropy_parallel.py +++ b/flash_attn/losses/cross_entropy_parallel.py @@ -14,7 +14,7 @@ from apex.transformer.tensor_parallel.utils import VocabUtility # `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for # `_all_gather_base` and `_reduce_scatter_base`. They require the most recent -# version of PyTorch. The following 4 lines are for backward comparability with +# version of PyTorch. The following 4 lines are for backward compatibility with # older PyTorch. if "all_gather_into_tensor" not in dir(torch.distributed): torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base diff --git a/setup.py b/setup.py index e4c7a02..c27c041 100644 --- a/setup.py +++ b/setup.py @@ -152,7 +152,7 @@ ext_modules.append( setup( name="flash_attn", - version="0.2.0", + version="0.2.1", packages=find_packages( exclude=("build", "csrc", "include", "tests", "dist", "docs", "benchmarks", "flash_attn.egg-info",) ),