Files
modelscope/docker/Dockerfile.ascend
baymax591 6f3200dfc2 [docker] Add Ascend NPU dockerfile (#1550)
Co-authored-by: ji-huazhong <hzji210@gmail.com>
Co-authored-by: vx120 <893600387@qq.com>
2025-11-26 09:56:47 +08:00

121 lines
6.6 KiB
Docker

FROM {base_image}
RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
pip config set install.trusted-host mirrors.aliyun.com
# Prepare required system dependencies
RUN apt-get update -y && \
apt-get install -y --no-install-recommends gcc g++ cmake libnuma-dev wget git curl jq vim build-essential && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
pip install --upgrade pip setuptools packaging && \
pip cache purge
{extra_content}
# Prepare repositories with low update frequency
RUN ARCH=$(uname -m) && \
# Set extra pip index for x86_64 platform
echo "[LOG INFO] Detected architecture: $ARCH" && \
if [ "$ARCH" = "x86_64" ]; then \
pip config set global.extra-index-url "https://download.pytorch.org/whl/cpu/"; \
fi && \
# Clone libs
git clone --depth 1 --branch v0.11.0 https://github.com/vllm-project/vllm && \
git clone --depth 1 --branch v0.11.0rc1 https://github.com/vllm-project/vllm-ascend.git && \
git clone https://gitcode.com/Ascend/MindSpeed.git && \
cd MindSpeed && git checkout f2b0977e && cd .. && \
git clone --depth 1 --branch core_v0.12.1 https://github.com/NVIDIA/Megatron-LM.git
# Install repositories with low update frequency
RUN ARCH=$(uname -m) && \
# Export and source env
if [ "$ARCH" = "aarch64" ]; then \
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/8.3.RC1/aarch64-linux/devlib/linux/aarch64:$LD_LIBRARY_PATH; \
elif [ "$ARCH" = "x86_64" ]; then \
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/8.3.RC1/x86_64-linux/devlib/linux/x86_64/:$LD_LIBRARY_PATH; \
fi && \
source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
source /usr/local/Ascend/nnal/atb/set_env.sh && \
# Install torch & torch_npu & torchvision
pip install torch==2.7.1 torch_npu==2.7.1 torchvision==0.22.1 && \
# Install vllm
cd vllm && VLLM_TARGET_DEVICE=empty pip install -v -e . && cd .. && \
# Install vllm-ascend
cd vllm-ascend && pip install -v -e . && cd .. && \
# Install MindSpeed & Megatron
pip install -e MindSpeed && \
# Clear extra files
rm -rf /tmp/* /var/tmp/* && \
pip cache purge
ENV PYTHONPATH ${PYTHONPATH}:/Megatron-LM
ARG INSTALL_MS_DEPS={install_ms_deps}
# install dependencies
COPY requirements /var/modelscope
RUN pip uninstall ms-swift modelscope -y && pip --no-cache-dir install pip==23.* -U && \
if [ "$INSTALL_MS_DEPS" = "True" ]; then \
pip --no-cache-dir install omegaconf==2.0.6 && \
pip install 'editdistance==0.8.1' && \
pip install --no-cache-dir 'cython<=0.29.36' versioneer 'numpy<2.0' -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/audio.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/cv.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/multi-modal.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/nlp.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/science.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/tests.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir -r /var/modelscope/server.txt && \
pip install --no-cache-dir https://modelscope.oss-cn-beijing.aliyuncs.com/packages/imageio_ffmpeg-0.4.9-py3-none-any.whl --no-dependencies --force && \
pip install adaseq pai-easycv && \
pip install --no-cache-dir 'scipy<1.13.0' && \
pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps && \
pip install --no-cache-dir mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 mpi4py paint_ldm ipykernel fasttext -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip uninstall ddpm_guided_diffusion -y && \
pip install --no-cache-dir 'blobfile>=1.0.5' && \
pip install 'ddpm_guided_diffusion' -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --no-index && \
pip uninstall shotdetect_scenedetect_lgss -y && \
pip install 'shotdetect_scenedetect_lgss' -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --no-index && \
pip uninstall MinDAEC -y && \
pip install https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/MinDAEC-0.0.2-py3-none-any.whl && \
pip cache purge; \
else \
pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
pip cache purge; \
fi
ARG CUR_TIME={cur_time}
RUN echo $CUR_TIME
RUN curl -fsSL https://ollama.com/install.sh | sh && \
pip install --no-cache-dir -U funasr scikit-learn && \
pip install --no-cache-dir -U qwen_vl_utils qwen_omni_utils librosa timm transformers accelerate peft trl safetensors && \
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b {swift_branch} --single-branch https://github.com/modelscope/ms-swift.git && \
cd ms-swift && pip install .[llm] && \
pip install .[eval] && pip install evalscope -U --no-dependencies && pip install ms-agent -U --no-dependencies && \
cd / && rm -fr /tmp/ms-swift && pip cache purge; \
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b {modelscope_branch} --single-branch https://github.com/modelscope/modelscope.git && \
cd modelscope && pip install . -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
cd / && rm -fr /tmp/modelscope && pip cache purge; \
pip install --no-cache-dir transformers diffusers timm>=0.9.0 && pip cache purge; \
pip install --no-cache-dir omegaconf==2.3.0 && pip cache purge;
RUN if [ "$INSTALL_MS_DEPS" = "True" ]; then \
pip install --no-cache-dir huggingface-hub transformers peft -U; \
fi;
ENV SETUPTOOLS_USE_DISTUTILS=stdlib
ENV VLLM_USE_MODELSCOPE=True
ENV LMDEPLOY_USE_MODELSCOPE=True
ENV MODELSCOPE_CACHE=/mnt/workspace/.cache/modelscope/hub
# Show install results
RUN pip list
SHELL ["/bin/bash", "-c"]