qwerrwe / setup.cfg
winglian's picture
cleanup, prep for 4bit quant support
12de7b7
raw
history blame
845 Bytes
[metadata]
name = axolotl
version = 0.1.0
description = You know you're going to axolotl questions
author = Wing Lian
author_email = [email protected]
license = MIT
[options]
package_dir =
=src
packages = find:
install_requires =
transformers @ git+https://github.com/huggingface/transformers.git@main
peft @ git+https://github.com/huggingface/peft.git@main
attrdict
fire
PyYAML == 6.0
black
bitsandbytes
datasets
accelerate
sentencepiece
wandb
flash-attn
einops
[options.packages.find]
where = src
[options.extras_require]
gptq_cuda = alpaca_lora_4bit[cuda] @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip#egg=alpaca_lora_4bit[cuda]
gptq_triton = alpaca_lora_4bit[triton] @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip#egg=alpaca_lora_4bit[triton]