mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-16 08:17:45 +01:00
update docker (#1073)
This commit is contained in:
@@ -137,6 +137,3 @@ RUN set -eux; \
|
||||
\
|
||||
pip --version
|
||||
# end of install python
|
||||
|
||||
RUN pip install tensorflow-cpu==2.16.1
|
||||
RUN pip install tf-keras==2.16.0 --no-dependencies
|
||||
|
||||
@@ -14,17 +14,16 @@ RUN apt-get update && \
|
||||
|
||||
COPY {meta_file} /tmp/install.sh
|
||||
|
||||
RUN pip uninstall ms-swift modelscope -y && \
|
||||
pip --no-cache-dir install pip==23.3.1 && \
|
||||
pip --no-cache-dir install omegaconf==2.0.6 && \
|
||||
pip install --no-cache-dir 'cython<=0.29.36' versioneer 'numpy<2.0' -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
|
||||
ARG INSTALL_MS_DEPS={install_ms_deps}
|
||||
|
||||
# install dependencies
|
||||
COPY requirements /var/modelscope
|
||||
|
||||
RUN if [ "$INSTALL_MS_DEPS" = "True" ]; then \
|
||||
RUN pip uninstall ms-swift modelscope -y && \
|
||||
if [ "$INSTALL_MS_DEPS" = "True" ]; then \
|
||||
pip --no-cache-dir install pip==23.3.1 && \
|
||||
pip --no-cache-dir install omegaconf==2.0.6 && \
|
||||
pip install --no-cache-dir 'cython<=0.29.36' versioneer 'numpy<2.0' -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
pip install --no-cache-dir kwsbp==0.0.6 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
pip install --no-cache-dir -r /var/modelscope/audio.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
@@ -36,23 +35,29 @@ RUN if [ "$INSTALL_MS_DEPS" = "True" ]; then \
|
||||
pip install --no-cache-dir -r /var/modelscope/server.txt && \
|
||||
pip install --no-cache-dir https://modelscope.oss-cn-beijing.aliyuncs.com/packages/imageio_ffmpeg-0.4.9-py3-none-any.whl --no-dependencies --force && \
|
||||
pip install --no-cache-dir 'scipy<1.13.0' && \
|
||||
pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
pip install --no-cache-dir -U funasr && \
|
||||
pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps && \
|
||||
pip install --no-cache-dir mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 mpi4py paint_ldm ipykernel fasttext -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
pip cache purge; \
|
||||
else \
|
||||
pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
||||
pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
pip cache purge; \
|
||||
fi
|
||||
|
||||
RUN echo "cache bust $(date +%Y%m%d%H%M%S)" && \
|
||||
sh /tmp/install.sh {version_args} && \
|
||||
curl -fsSL https://ollama.com/install.sh | sh && \
|
||||
pip install --no-cache-dir -U qwen_vl_utils pyav librosa timm transformers accelerate peft trl safetensors && \
|
||||
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b {modelscope_branch} --single-branch https://github.com/modelscope/modelscope.git && \
|
||||
cd modelscope && pip install . -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
cd / && rm -fr /tmp/modelscope && pip cache purge && \
|
||||
cd / && rm -fr /tmp/modelscope && pip cache purge; \
|
||||
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b {swift_branch} --single-branch https://github.com/modelscope/ms-swift.git && \
|
||||
cd ms-swift && pip install .[llm] && \
|
||||
pip install .[eval] && pip install xtuner --no-dependencies && \
|
||||
cd / && rm -fr /tmp/ms-swift && pip cache purge && \
|
||||
pip install .[eval] && pip install evalscope -U --no-dependencies && pip install xtuner --no-dependencies && \
|
||||
cd / && rm -fr /tmp/ms-swift && pip cache purge; \
|
||||
pip install --no-cache-dir torch=={torch_version} torchvision=={torchvision_version} torchaudio=={torchaudio_version} {index_url} && \
|
||||
pip install --no-cache-dir transformers -U huggingface-hub==0.25.0 && \
|
||||
pip install tf-keras==2.16.0 --no-dependencies && \
|
||||
pip install --no-cache-dir transformers -U huggingface-hub==0.25.0 && pip cache purge; \
|
||||
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||
pip config set install.trusted-host mirrors.aliyun.com && \
|
||||
cp /tmp/resources/ubuntu2204.aliyun /etc/apt/sources.list
|
||||
|
||||
@@ -139,7 +139,7 @@ class CPUImageBuilder(Builder):
|
||||
base_image = (
|
||||
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-{self.args.python_tag}'
|
||||
f'-torch{self.args.torch_version}-base')
|
||||
extra_content = """\nRUN pip install adaseq\nRUN pip install pai-easycv"""
|
||||
extra_content = """\nRUN pip install adaseq pai-easycv"""
|
||||
|
||||
with open('docker/Dockerfile.ubuntu', 'r') as f:
|
||||
content = f.read()
|
||||
@@ -191,7 +191,15 @@ class GPUImageBuilder(Builder):
|
||||
|
||||
def generate_dockerfile(self) -> str:
|
||||
meta_file = './docker/install.sh'
|
||||
extra_content = """\nRUN pip install adaseq\nRUN pip install pai-easycv"""
|
||||
extra_content = """
|
||||
RUN pip install adaseq pai-easycv && \
|
||||
pip install tf-keras==2.16.0 --no-dependencies && \
|
||||
pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 basicsr pynvml shortuuid && \
|
||||
pip install --no-cache-dir apex -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0" \
|
||||
pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git'
|
||||
"""
|
||||
|
||||
version_args = (
|
||||
f'{self.args.torch_version} {self.args.torchvision_version} {self.args.torchaudio_version} '
|
||||
f'{self.args.vllm_version} {self.args.lmdeploy_version} {self.args.autogptq_version}'
|
||||
|
||||
@@ -7,29 +7,13 @@ vllm_version=${4:-0.6.0}
|
||||
lmdeploy_version=${5:-0.6.1}
|
||||
autogptq_version=${6:-0.7.1}
|
||||
|
||||
pip install --no-cache-dir -U autoawq
|
||||
|
||||
pip uninstall -y torch torchvision torchaudio
|
||||
|
||||
pip install --no-cache-dir torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version
|
||||
|
||||
pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
|
||||
pip install --no-cache-dir -U funasr
|
||||
|
||||
pip install --no-cache-dir -U qwen_vl_utils pyav librosa autoawq timm transformers accelerate peft optimum trl safetensors
|
||||
|
||||
pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr
|
||||
|
||||
pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
||||
|
||||
pip install --no-cache-dir mpi4py paint_ldm -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
|
||||
pip install --no-cache-dir mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
|
||||
pip install --no-cache-dir ipykernel fasttext deepspeed apex -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
|
||||
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0" pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git';
|
||||
pip install --no-cache-dir tiktoken transformers_stream_generator bitsandbytes deepspeed torchmetrics decord optimum
|
||||
|
||||
# pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiTRUE-cp310-cp310-linux_x86_64.whl
|
||||
# find on: https://github.com/Dao-AILab/flash-attention/releases
|
||||
@@ -44,5 +28,3 @@ pip install --no-cache-dir -U triton
|
||||
pip install --no-cache-dir vllm==$vllm_version -U
|
||||
|
||||
pip install --no-cache-dir -U lmdeploy==$lmdeploy_version --no-deps
|
||||
|
||||
pip install --no-cache-dir pynvml shortuuid
|
||||
|
||||
@@ -6,16 +6,4 @@ torchaudio_version=${3:-2.4.0}
|
||||
|
||||
pip uninstall -y torch torchvision torchaudio
|
||||
|
||||
pip install --no-cache-dir -U torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version --index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
|
||||
pip install --no-cache-dir -U funasr
|
||||
|
||||
pip install --no-cache-dir -U qwen_vl_utils pyav librosa timm transformers accelerate peft trl safetensors
|
||||
|
||||
pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
||||
|
||||
pip install --no-cache-dir mpi4py paint_ldm mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 ipykernel fasttext -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||
pip install --no-cache-dir torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version --index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
Reference in New Issue
Block a user