Nekochu commited on
Commit
70abe8f
·
verified ·
1 Parent(s): 482b5b7

Add flash_attn-2.8.1-cp310-cp310-win_amd64.whl

Browse files

0. Open cmd "x64 Native Tools Command Prompt for VS 2022"
1. python -m pip install -U torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu128
2. pip install torch numpy ninja packaging wheel
3. git clone https://github.com/Dao-AILab/flash-attention.git && cd flash-attention
4. set DISTUTILS_USE_SDK=1 && set MAX_JOBS=1 && set FLASH_ATTENTION_FORCE_BUILD=TRUE
5. python setup.py bdist_wheel

Optional check: python -c "import torch; from flash_attn import flash_attn_func; d=torch.device('cuda'); q,k,v=[torch.randn(2,128,4,64,dtype=torch.float16,device=d) for _ in range(3)]; print('✅ Success!' if flash_attn_func(q,k,v).shape==q.shape else '❌ Failed')"

.gitattributes CHANGED
@@ -58,3 +58,4 @@ Blackwell-50x_whl/sageattention-2.1.1%2Bcu128torch2.7.0-cp310-cp310-win_amd64.wh
58
  Blackwell-50x_whl/xformers-0.0.30%2B836cd905.d20250327-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
59
  Blackwell-50x_whl/sageattention-2.1.1+cu128torch2.7.0-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
60
  Blackwell-50x_whl/xformers-0.0.30+836cd905.d20250327-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
 
 
58
  Blackwell-50x_whl/xformers-0.0.30%2B836cd905.d20250327-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
59
  Blackwell-50x_whl/sageattention-2.1.1+cu128torch2.7.0-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
60
  Blackwell-50x_whl/xformers-0.0.30+836cd905.d20250327-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
61
+ Blackwell-50x_whl/flash_attn-2.8.1-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
Blackwell-50x_whl/flash_attn-2.8.1-cp310-cp310-win_amd64.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6318d394fdf1886a864391d115c4fd4458725a9cb1a9c30f1ddfb5a2bf2991ce
3
+ size 123072468