diff --git a/docker/Dockerfile.ubuntu b/docker/Dockerfile.ubuntu index 71a6f8b1..f72e4cc9 100644 --- a/docker/Dockerfile.ubuntu +++ b/docker/Dockerfile.ubuntu @@ -60,13 +60,13 @@ RUN sh /tmp/install.sh {version_args} && \ curl -fsSL https://ollama.com/install.sh | sh && \ pip install --no-cache-dir -U funasr scikit-learn && \ pip install --no-cache-dir -U qwen_vl_utils qwen_omni_utils librosa timm transformers accelerate peft trl safetensors && \ - cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b {modelscope_branch} --single-branch https://github.com/modelscope/modelscope.git && \ - cd modelscope && pip install . -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - cd / && rm -fr /tmp/modelscope && pip cache purge; \ cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b {swift_branch} --single-branch https://github.com/modelscope/ms-swift.git && \ cd ms-swift && pip install .[llm] && \ pip install .[eval] && pip install evalscope -U --no-dependencies && pip install ms-agent -U --no-dependencies && \ cd / && rm -fr /tmp/ms-swift && pip cache purge; \ + cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b {modelscope_branch} --single-branch https://github.com/modelscope/modelscope.git && \ + cd modelscope && pip install . -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ + cd / && rm -fr /tmp/modelscope && pip cache purge; \ pip install --no-cache-dir torch=={torch_version} torchvision=={torchvision_version} torchaudio=={torchaudio_version} {index_url} && \ pip install --no-cache-dir transformers diffusers timm>=0.9.0 && pip cache purge; \ pip install --no-cache-dir omegaconf==2.3.0 && pip cache purge; \ @@ -76,7 +76,7 @@ RUN sh /tmp/install.sh {version_args} && \ RUN if [ "$INSTALL_MS_DEPS" = "True" ]; then \ - pip install --no-cache-dir huggingface-hub transformers -U; \ + pip install --no-cache-dir huggingface-hub transformers peft -U; \ fi; \ if [ "$INSTALL_MEGATRON_DEPS" = "True" ]; then \ pip install liger_kernel nvitop pre-commit transformers huggingface-hub -U && \ diff --git a/docker/install.sh b/docker/install.sh index d8380091..b15dab31 100644 --- a/docker/install.sh +++ b/docker/install.sh @@ -23,6 +23,11 @@ pip install --no-cache-dir tiktoken transformers_stream_generator bitsandbytes d # cd /tmp && git clone https://github.com/Dao-AILab/flash-attention.git && cd flash-attention && python setup.py install && cd / && rm -fr /tmp/flash-attention && pip cache purge; pip install --no-cache-dir flash_attn==$flashattn_version -pip install --no-cache-dir triton auto-gptq==$autogptq_version vllm==$vllm_version -U && pip cache purge +pip install --no-cache-dir triton auto-gptq==$autogptq_version -U && pip cache purge + +if [[ "$(printf '%s\n' "0.6.0" "$vllm_version" | sort -V | head -n1)" = "0.6.0" ]]; then + # vllm_version is >= 0.6.0 + pip install --no-cache-dir vllm==$vllm_version && pip cache purge +fi # pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter