2024-10-21 18:26:23 +08:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
|
|
torch_version=${1:-2.4.0}
|
|
|
|
|
torchvision_version=${2:-0.19.0}
|
|
|
|
|
torchaudio_version=${3:-2.4.0}
|
|
|
|
|
vllm_version=${4:-0.6.0}
|
|
|
|
|
lmdeploy_version=${5:-0.6.1}
|
|
|
|
|
autogptq_version=${6:-0.7.1}
|
2025-01-15 14:08:52 +08:00
|
|
|
flashattn_version=${7:-2.7.1.post4}
|
2024-10-21 18:26:23 +08:00
|
|
|
|
2024-10-22 11:13:17 +08:00
|
|
|
pip uninstall -y torch torchvision torchaudio
|
|
|
|
|
|
2024-10-23 09:59:51 +08:00
|
|
|
pip install --no-cache-dir torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version
|
2024-10-22 11:13:17 +08:00
|
|
|
|
2025-02-07 17:31:32 +08:00
|
|
|
pip install --no-cache-dir -U autoawq lmdeploy==$lmdeploy_version
|
|
|
|
|
|
|
|
|
|
pip install --no-cache-dir torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version
|
|
|
|
|
|
2024-11-08 10:56:39 +08:00
|
|
|
pip install --no-cache-dir tiktoken transformers_stream_generator bitsandbytes deepspeed torchmetrics decord optimum
|
2024-10-21 18:26:23 +08:00
|
|
|
|
|
|
|
|
# pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiTRUE-cp310-cp310-linux_x86_64.whl
|
|
|
|
|
# find on: https://github.com/Dao-AILab/flash-attention/releases
|
2025-01-15 14:08:52 +08:00
|
|
|
# cd /tmp && git clone https://github.com/Dao-AILab/flash-attention.git && cd flash-attention && python setup.py install && cd / && rm -fr /tmp/flash-attention && pip cache purge;
|
|
|
|
|
pip install --no-cache-dir flash_attn==$flashattn_version
|
2024-10-21 18:26:23 +08:00
|
|
|
|
2024-11-11 14:56:36 +08:00
|
|
|
pip install --no-cache-dir triton auto-gptq==$autogptq_version vllm==$vllm_version -U && pip cache purge
|
2024-10-21 18:26:23 +08:00
|
|
|
|
|
|
|
|
# pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter
|