mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-16 16:27:45 +01:00
Fix/dockerfile (#1034)
* update version * update build_image for swift * add outlines in build_image.sh * set outlines to 0.0.46 to avoid failure of building docker * set vllm==0.5.2 in Dockerfile.ubuntu to fix CI issue * update outlines version: <0.1 * update ms-swift installation in build_image.sh * merge diff from release/1.19
This commit is contained in:
@@ -159,7 +159,7 @@ docker_file_content=`cat docker/Dockerfile.ubuntu`
|
||||
|
||||
BUILD_HASH_ID=$(git rev-parse HEAD)
|
||||
# install thrid part library
|
||||
docker_file_content="${docker_file_content} \nRUN export COMMIT_ID=$BUILD_HASH_ID && pip install --no-cache-dir -U adaseq pai-easycv && pip install --no-cache-dir -U 'ms-swift' 'decord' 'qwen_vl_utils' 'pyav' 'librosa' 'funasr' autoawq 'timm>0.9.5' 'transformers' 'accelerate' 'peft' 'optimum' 'trl'"
|
||||
docker_file_content="${docker_file_content} \nRUN export COMMIT_ID=$BUILD_HASH_ID && pip install --no-cache-dir -U adaseq pai-easycv && pip install --no-cache-dir -U 'ms-swift' 'decord' 'qwen_vl_utils' 'pyav' 'librosa' 'funasr' autoawq 'timm>0.9.5' 'transformers' 'accelerate' 'peft' 'optimum' 'trl' 'outlines<0.1'"
|
||||
|
||||
docker_file_content="${docker_file_content} \nRUN pip uninstall modelscope -y && export COMMIT_ID=$BUILD_HASH_ID && cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $build_branch --single-branch $REPO_URL && cd modelscope && pip install . && cd / && rm -fr /tmp/modelscope && pip cache purge;"
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||
pip install --no-cache-dir -U 'xformers<0.0.27' --index-url https://download.pytorch.org/whl/cu121 && \
|
||||
pip install --no-cache-dir --force tinycudann==1.7 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter && \
|
||||
pip install --no-cache-dir -U triton 'vllm==0.5.1' https://modelscope.oss-cn-beijing.aliyuncs.com/packages/lmdeploy-0.5.0-cp310-cp310-linux_x86_64.whl; \
|
||||
pip install --no-cache-dir -U triton 'vllm==0.5.2' https://modelscope.oss-cn-beijing.aliyuncs.com/packages/lmdeploy-0.5.0-cp310-cp310-linux_x86_64.whl; \
|
||||
else \
|
||||
echo 'cpu unsupport vllm auto-gptq'; \
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user