mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-24 03:59:23 +01:00
docker file py38 and py37 compatible merge
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/12722824 * debug * add missing deps * add stanza * fix numpy issue * numpy version * tf case * numpy <= 1.22.0 * fix tf 1.15.5 case * add python38 and python37 compatible docker file * cv add fvcore * add easycv * remove debug code * fix style issue * fix python3.7 python3.8 compatible issue * comment * modify run config * scipy<=1.7.3 * revert scipy<=1.7.3 * fix compatible bug * fix compatible bug * add easycv * update version: 1.6.1rc0 Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/12754942 fix python3.8 and python3.8 compatible issue * numpy version change * numpy version * numpy version * fix numpy version * fix numpy version * update version: 1.6.1rc0 Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/12754942 fix python3.8 and python3.8 compatible issue * restore setup.py * restore setup.py * add build base image * fix style * numpy version fix * optimize build image, split base and modelscope image * fix style issue * modify example path * modify examples folder * add SETUPTOOLS_USE_DISTUTILS=stdlib comments
This commit is contained in:
119
.dev_scripts/build_base_image.sh
Normal file
119
.dev_scripts/build_base_image.sh
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# default values.
|
||||||
|
BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04
|
||||||
|
BASE_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
||||||
|
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
|
||||||
|
python_version=3.7.13
|
||||||
|
torch_version=1.11.0
|
||||||
|
cudatoolkit_version=11.3
|
||||||
|
tensorflow_version=1.15.5
|
||||||
|
version=None
|
||||||
|
is_cpu=False
|
||||||
|
function usage(){
|
||||||
|
echo "usage: build.sh "
|
||||||
|
echo " --python=python_version set python version, default: $python_version"
|
||||||
|
echo " --torch=torch_version set pytorch version, fefault: $torch_version"
|
||||||
|
echo " --tensorflow=tensorflow_version set tensorflow version, default: $tensorflow_version"
|
||||||
|
echo " --version=version set image version, default: $version"
|
||||||
|
echo " --test option for run test before push image, only push on ci test pass"
|
||||||
|
echo " --cpu option for build cpu version"
|
||||||
|
echo " --dsw option for build dsw version"
|
||||||
|
echo " --ci option for build ci version"
|
||||||
|
echo " --push option for push image to remote repo"
|
||||||
|
}
|
||||||
|
for i in "$@"; do
|
||||||
|
case $i in
|
||||||
|
--python=*)
|
||||||
|
python_version="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--torch=*)
|
||||||
|
torch_version="${i#*=}"
|
||||||
|
shift # pytorch version
|
||||||
|
;;
|
||||||
|
--tensorflow=*)
|
||||||
|
tensorflow_version="${i#*=}"
|
||||||
|
shift # tensorflow version
|
||||||
|
;;
|
||||||
|
--version=*)
|
||||||
|
version="${i#*=}"
|
||||||
|
shift # version
|
||||||
|
;;
|
||||||
|
--cpu)
|
||||||
|
is_cpu=True
|
||||||
|
shift # is cpu image
|
||||||
|
;;
|
||||||
|
--push)
|
||||||
|
is_push=True
|
||||||
|
shift # option for push image to remote repo
|
||||||
|
;;
|
||||||
|
--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*|--*)
|
||||||
|
echo "Unknown option $i"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$version" == "None" ]; then
|
||||||
|
echo "version must specify!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ "$is_cpu" == "True" ]; then
|
||||||
|
export BASE_IMAGE=$BASE_CPU_IMAGE
|
||||||
|
base_tag=ubuntu20.04
|
||||||
|
export USE_GPU=False
|
||||||
|
else
|
||||||
|
export BASE_IMAGE=$BASE_GPU_IMAGE
|
||||||
|
base_tag=ubuntu20.04-cuda11.3.0
|
||||||
|
export USE_GPU=True
|
||||||
|
fi
|
||||||
|
if [[ $python_version == 3.7* ]]; then
|
||||||
|
base_tag=$base_tag-py37
|
||||||
|
elif [[ $python_version == 3.8* ]]; then
|
||||||
|
base_tag=$base_tag-py38
|
||||||
|
elif [[ $python_version == 3.9* ]]; then
|
||||||
|
base_tag=$base_tag-py39
|
||||||
|
else
|
||||||
|
echo "Unsupport python version: $python_version"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base-$version
|
||||||
|
export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag
|
||||||
|
export PYTHON_VERSION=$python_version
|
||||||
|
export TORCH_VERSION=$torch_version
|
||||||
|
export CUDATOOLKIT_VERSION=$cudatoolkit_version
|
||||||
|
export TENSORFLOW_VERSION=$tensorflow_version
|
||||||
|
echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\n"
|
||||||
|
docker_file_content=`cat docker/Dockerfile.ubuntu_base`
|
||||||
|
printf "$docker_file_content" > Dockerfile
|
||||||
|
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
docker build -t $IMAGE_TO_BUILD \
|
||||||
|
--build-arg USE_GPU \
|
||||||
|
--build-arg BASE_IMAGE \
|
||||||
|
--build-arg PYTHON_VERSION \
|
||||||
|
--build-arg TORCH_VERSION \
|
||||||
|
--build-arg CUDATOOLKIT_VERSION \
|
||||||
|
--build-arg TENSORFLOW_VERSION \
|
||||||
|
-f Dockerfile .
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "Image build done"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "Running docker build command error, we will retry"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$is_push" == "True" ]; then
|
||||||
|
echo "Pushing image: $IMAGE_TO_BUILD"
|
||||||
|
docker push $IMAGE_TO_BUILD
|
||||||
|
fi
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# default values.
|
# default values.
|
||||||
BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04
|
BASE_PY38_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-base-1.6.1
|
||||||
BASE_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-base-1.6.1
|
||||||
|
BASE_PY37_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-base-1.6.1
|
||||||
|
BASE_PY37_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base-1.6.1
|
||||||
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
|
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
|
||||||
python_version=3.7.13
|
python_version=3.7.13
|
||||||
torch_version=1.11.0
|
torch_version=1.11.0
|
||||||
@@ -86,20 +88,30 @@ if [ "$modelscope_version" == "None" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [ "$is_cpu" == "True" ]; then
|
if [ "$is_cpu" == "True" ]; then
|
||||||
export BASE_IMAGE=$BASE_CPU_IMAGE
|
|
||||||
base_tag=ubuntu20.04
|
base_tag=ubuntu20.04
|
||||||
export USE_GPU=False
|
export USE_GPU=False
|
||||||
else
|
else
|
||||||
export BASE_IMAGE=$BASE_GPU_IMAGE
|
|
||||||
base_tag=ubuntu20.04-cuda11.3.0
|
base_tag=ubuntu20.04-cuda11.3.0
|
||||||
export USE_GPU=True
|
export USE_GPU=True
|
||||||
fi
|
fi
|
||||||
if [[ $python_version == 3.7* ]]; then
|
if [[ $python_version == 3.7* ]]; then
|
||||||
|
if [ "$is_cpu" == "True" ]; then
|
||||||
|
echo "Building python3.7 cpu image"
|
||||||
|
export BASE_IMAGE=$BASE_PY37_CPU_IMAGE
|
||||||
|
else
|
||||||
|
echo "Building python3.7 gpu image"
|
||||||
|
export BASE_IMAGE=$BASE_PY37_GPU_IMAGE
|
||||||
|
fi
|
||||||
base_tag=$base_tag-py37
|
base_tag=$base_tag-py37
|
||||||
elif [[ $python_version == 3.8* ]]; then
|
elif [[ $python_version == 3.8* ]]; then
|
||||||
|
if [ "$is_cpu" == "True" ]; then
|
||||||
|
echo "Building python3.8 cpu image"
|
||||||
|
export BASE_IMAGE=$BASE_PY38_CPU_IMAGE
|
||||||
|
else
|
||||||
|
echo "Building python3.8 gpu image"
|
||||||
|
export BASE_IMAGE=$BASE_PY38_GPU_IMAGE
|
||||||
|
fi
|
||||||
base_tag=$base_tag-py38
|
base_tag=$base_tag-py38
|
||||||
elif [[ $python_version == 3.9* ]]; then
|
|
||||||
base_tag=$base_tag-py39
|
|
||||||
else
|
else
|
||||||
echo "Unsupport python version: $python_version"
|
echo "Unsupport python version: $python_version"
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
@@ -1,102 +1,5 @@
|
|||||||
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1
|
||||||
FROM $BASE_IMAGE
|
FROM $BASE_IMAGE
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV TZ=Asia/Shanghai
|
|
||||||
ENV CONDA_DIR /opt/conda
|
|
||||||
ENV PATH="${CONDA_DIR}/bin:${PATH}"
|
|
||||||
ENV arch=x86_64
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
|
||||||
COPY docker/rcfiles /tmp/resources
|
|
||||||
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
|
|
||||||
RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
|
|
||||||
apt-get clean && \
|
|
||||||
cp /tmp/resources/ubuntu20.04_sources.tuna /etc/apt/sources.list && \
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y locales wget git strace gdb sox libopenmpi-dev curl strace vim ffmpeg libsm6 tzdata language-pack-zh-hans ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
|
|
||||||
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
|
|
||||||
dpkg -i ./git-lfs_3.2.0_amd64.deb && \
|
|
||||||
rm -f ./git-lfs_3.2.0_amd64.deb && \
|
|
||||||
locale-gen zh_CN && \
|
|
||||||
locale-gen zh_CN.utf8 && \
|
|
||||||
update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
|
|
||||||
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
|
|
||||||
dpkg-reconfigure --frontend noninteractive tzdata && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
|
|
||||||
|
|
||||||
#install and config python
|
|
||||||
ARG PYTHON_VERSION=3.7.13
|
|
||||||
RUN wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
|
|
||||||
/bin/bash miniconda.sh -b -p /opt/conda && \
|
|
||||||
rm -f miniconda.sh && \
|
|
||||||
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
|
||||||
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
|
||||||
cp /tmp/resources/conda.tuna ~/.condarc && \
|
|
||||||
source /root/.bashrc && \
|
|
||||||
conda install --yes python==${PYTHON_VERSION} && \
|
|
||||||
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
|
||||||
pip config set install.trusted-host mirrors.aliyun.com
|
|
||||||
|
|
||||||
ARG USE_GPU=True
|
|
||||||
|
|
||||||
# install pytorch
|
|
||||||
ARG TORCH_VERSION=1.12.0
|
|
||||||
ARG CUDATOOLKIT_VERSION=11.3
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113; \
|
|
||||||
else \
|
|
||||||
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# install tensorflow
|
|
||||||
ARG TENSORFLOW_VERSION=1.15.5
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
|
||||||
else \
|
|
||||||
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# mmcv-full<=1.7.0 for mmdet3d compatible
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
|
|
||||||
else \
|
|
||||||
MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# default shell bash
|
|
||||||
ENV SHELL=/bin/bash
|
|
||||||
# install special package
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
pip install --no-cache-dir dgl-cu113 dglgo -f https://data.dgl.ai/wheels/repo.html; \
|
|
||||||
else \
|
|
||||||
pip install --no-cache-dir dgl dglgo -f https://data.dgl.ai/wheels/repo.html; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# copy install scripts
|
|
||||||
COPY docker/scripts/install_unifold.sh docker/scripts/install_colmap.sh docker/scripts/install_pytorch3d_nvdiffrast.sh docker/scripts/install_tiny_cuda_nn.sh docker/scripts/install_apex.sh /tmp/
|
|
||||||
|
|
||||||
# for uniford
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
bash /tmp/install_unifold.sh; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport uniford'; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
pip install --no-cache-dir git+https://github.com/gxd1994/Pointnet2.PyTorch.git@master#subdirectory=pointnet2; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport Pointnet2'; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN pip install --no-cache-dir detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
|
||||||
|
|
||||||
# 3d supports
|
|
||||||
RUN bash /tmp/install_colmap.sh
|
|
||||||
RUN bash /tmp/install_tiny_cuda_nn.sh
|
|
||||||
RUN bash /tmp/install_pytorch3d_nvdiffrast.sh
|
|
||||||
# end of 3D
|
|
||||||
|
|
||||||
# install modelscope
|
# install modelscope
|
||||||
COPY requirements /var/modelscope
|
COPY requirements /var/modelscope
|
||||||
@@ -115,12 +18,25 @@ RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \
|
|||||||
cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/
|
cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/
|
||||||
|
|
||||||
COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
|
COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
|
||||||
RUN pip install --no-cache-dir xtcocotools==1.12 detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force
|
# python3.8 pip install git+https://github.com/jin-s13/xtcocoapi.git@v1.13
|
||||||
|
# pip install git+https://github.com/gatagat/lap.git@v0.4.0
|
||||||
|
RUN pip install --no-cache-dir text2sql_lgesql==1.3.0 \
|
||||||
|
git+https://github.com/jin-s13/xtcocoapi.git@v1.13 \
|
||||||
|
git+https://github.com/gatagat/lap.git@v0.4.0 \
|
||||||
|
detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
||||||
|
|
||||||
# speechbrain==0.5.7 for audio compatible
|
RUN pip install --no-cache-dir mpi4py paint_ldm adaseq>=0.5.0 \
|
||||||
RUN pip install --no-cache-dir speechbrain==0.5.7 adaseq>=0.5.0 mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 numpy==1.18.5 wenetruntime==1.11.0 ipykernel fairseq fasttext deepspeed
|
mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 wenetruntime==1.11.0 \
|
||||||
|
ipykernel fairseq fasttext deepspeed -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
# for cpu install cpu version faiss, faiss depends on blas lib, we install libopenblas TODO rename gpu or cpu version faiss
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
bash /tmp/install_apex.sh; \
|
pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 faiss==1.7.2 safetensors typeguard==2.13.3 scikit-learn 'pandas<1.4.0' pai-easycv librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
||||||
else \
|
else \
|
||||||
echo 'cpu unsupport apex'; \
|
pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/faiss-1.7.2-py37-none-linux_x86_64.whl safetensors typeguard==2.13.3 scikit-learn 'pandas<1.4.0' pai-easycv librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
COPY examples /modelscope/examples
|
||||||
|
|
||||||
|
# for pai-easycv setup compatiblity issue
|
||||||
|
ENV SETUPTOOLS_USE_DISTUTILS=stdlib
|
||||||
|
|||||||
136
docker/Dockerfile.ubuntu_base
Normal file
136
docker/Dockerfile.ubuntu_base
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
||||||
|
FROM $BASE_IMAGE
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV TZ=Asia/Shanghai
|
||||||
|
ENV CONDA_DIR /opt/conda
|
||||||
|
ENV PATH="${CONDA_DIR}/bin:${PATH}"
|
||||||
|
ENV arch=x86_64
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
COPY docker/rcfiles /tmp/resources
|
||||||
|
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
|
||||||
|
RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
|
||||||
|
apt-get clean && \
|
||||||
|
cp /tmp/resources/sources.list.aliyun /etc/apt/sources.list && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y locales wget git strace gdb sox libopenmpi-dev curl \
|
||||||
|
libgeos-dev strace vim ffmpeg libsm6 tzdata language-pack-zh-hans \
|
||||||
|
ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
|
||||||
|
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
|
||||||
|
dpkg -i ./git-lfs_3.2.0_amd64.deb && \
|
||||||
|
rm -f ./git-lfs_3.2.0_amd64.deb && \
|
||||||
|
locale-gen zh_CN && \
|
||||||
|
locale-gen zh_CN.utf8 && \
|
||||||
|
update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
|
||||||
|
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
|
||||||
|
dpkg-reconfigure --frontend noninteractive tzdata && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
|
||||||
|
|
||||||
|
#install and config python
|
||||||
|
ARG PYTHON_VERSION=3.7.13
|
||||||
|
# Miniconda3-py37_23.1.0-1-Linux-x86_64.sh is last python3.7 version
|
||||||
|
RUN if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
|
||||||
|
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-py37_23.1.0-1-Linux-x86_64.sh -O ./miniconda.sh && \
|
||||||
|
/bin/bash miniconda.sh -b -p /opt/conda && \
|
||||||
|
rm -f miniconda.sh && \
|
||||||
|
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||||
|
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||||
|
cp /tmp/resources/conda.tuna ~/.condarc && \
|
||||||
|
source /root/.bashrc && \
|
||||||
|
conda install --yes python==${PYTHON_VERSION} && \
|
||||||
|
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||||
|
pip config set install.trusted-host mirrors.aliyun.com;\
|
||||||
|
else \
|
||||||
|
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
|
||||||
|
/bin/bash miniconda.sh -b -p /opt/conda && \
|
||||||
|
rm -f miniconda.sh && \
|
||||||
|
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||||
|
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||||
|
cp /tmp/resources/conda.tuna ~/.condarc && \
|
||||||
|
source /root/.bashrc && \
|
||||||
|
conda install --yes python==${PYTHON_VERSION} && \
|
||||||
|
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||||
|
pip config set install.trusted-host mirrors.aliyun.com;\
|
||||||
|
fi
|
||||||
|
|
||||||
|
ARG USE_GPU=True
|
||||||
|
|
||||||
|
# install pytorch
|
||||||
|
ARG TORCH_VERSION=1.12.0
|
||||||
|
ARG CUDATOOLKIT_VERSION=cu113
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113; \
|
||||||
|
else \
|
||||||
|
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# install tensorflow
|
||||||
|
ARG TENSORFLOW_VERSION=1.15.5
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
||||||
|
else \
|
||||||
|
# only python 3.7 has tensorflow 1.15.5
|
||||||
|
if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
|
||||||
|
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
|
||||||
|
else \
|
||||||
|
pip install --no-cache-dir numpy==1.18.5 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/tensorflow-1.15.5-cp38-cp38-linux_x86_64.whl; \
|
||||||
|
fi \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# mmcv-full<=1.7.0 for mmdet3d compatible
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
|
||||||
|
else \
|
||||||
|
MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# default shell bash
|
||||||
|
ENV SHELL=/bin/bash
|
||||||
|
# install special package
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
pip install --no-cache-dir dgl-cu113 dglgo -f https://data.dgl.ai/wheels/repo.html; \
|
||||||
|
else \
|
||||||
|
pip install --no-cache-dir dgl==0.9.0 dglgo -f https://data.dgl.ai/wheels/repo.html; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# copy install scripts
|
||||||
|
COPY docker/scripts/install_unifold.sh docker/scripts/install_colmap.sh docker/scripts/install_pytorch3d_nvdiffrast.sh docker/scripts/install_tiny_cuda_nn.sh docker/scripts/install_apex.sh /tmp/
|
||||||
|
|
||||||
|
# for uniford
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
bash /tmp/install_unifold.sh; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport uniford'; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
pip install --no-cache-dir git+https://github.com/gxd1994/Pointnet2.PyTorch.git@master#subdirectory=pointnet2; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport Pointnet2'; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 3d supports
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
bash /tmp/install_colmap.sh; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport colmap'; \
|
||||||
|
fi
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
bash /tmp/install_tiny_cuda_nn.sh \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport tiny_cudann'; \
|
||||||
|
fi
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
bash /tmp/install_pytorch3d_nvdiffrast.sh; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport pytorch3d nvdiffrast'; \
|
||||||
|
fi
|
||||||
|
# end of 3D
|
||||||
|
# install apex after deepspeed
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
bash /tmp/install_apex.sh; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport apex'; \
|
||||||
|
fi
|
||||||
@@ -1,25 +1,14 @@
|
|||||||
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted
|
deb https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted
|
# deb-src https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
|
||||||
|
|
||||||
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted
|
deb https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted
|
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
|
||||||
|
|
||||||
deb http://mirrors.aliyun.com/ubuntu/ bionic universe
|
deb https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic universe
|
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
|
||||||
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates universe
|
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates universe
|
|
||||||
|
|
||||||
deb http://mirrors.aliyun.com/ubuntu/ bionic multiverse
|
# deb https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic multiverse
|
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
|
||||||
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates multiverse
|
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates multiverse
|
|
||||||
|
|
||||||
deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
|
deb https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
|
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
|
||||||
|
|
||||||
deb http://mirrors.aliyun.com/ubuntu bionic-security main restricted
|
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu bionic-security main restricted
|
|
||||||
deb http://mirrors.aliyun.com/ubuntu bionic-security universe
|
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu bionic-security universe
|
|
||||||
deb http://mirrors.aliyun.com/ubuntu bionic-security multiverse
|
|
||||||
# deb-src http://mirrors.aliyun.com/ubuntu bionic-security multiverse
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
export MAX_JOBS=16 \
|
export MAX_JOBS=16 \
|
||||||
&& git clone https://github.com/NVIDIA/apex \
|
&& git clone https://github.com/NVIDIA/apex \
|
||||||
&& cd apex \
|
&& cd apex \
|
||||||
&& TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6" pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \
|
&& git checkout 6bd01c4b99a84648ad5e5238a959735e6936c813 \
|
||||||
|
&& TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6" pip install -v --disable-pip-version-check --no-cache --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \
|
||||||
&& cd .. \
|
&& cd .. \
|
||||||
&& rm -rf apex
|
&& rm -rf apex
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
kaldiio
|
kaldiio
|
||||||
kwsbp>=0.0.6
|
kwsbp>=0.0.6
|
||||||
matplotlib
|
matplotlib
|
||||||
numpy
|
|
||||||
py_sound_connect>=0.1
|
py_sound_connect>=0.1
|
||||||
scipy
|
scipy
|
||||||
SoundFile>0.10
|
SoundFile>0.10
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ hyperpyyaml
|
|||||||
librosa==0.9.2
|
librosa==0.9.2
|
||||||
MinDAEC
|
MinDAEC
|
||||||
mir_eval>=0.7
|
mir_eval>=0.7
|
||||||
numpy
|
|
||||||
rotary_embedding_torch>=0.1.5
|
rotary_embedding_torch>=0.1.5
|
||||||
scipy
|
scipy
|
||||||
SoundFile>0.10
|
SoundFile>0.10
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ fastai>=1.0.51
|
|||||||
ffmpeg>=1.4
|
ffmpeg>=1.4
|
||||||
ffmpeg-python>=0.2.0
|
ffmpeg-python>=0.2.0
|
||||||
ftfy
|
ftfy
|
||||||
|
fvcore
|
||||||
imageio>=2.9.0
|
imageio>=2.9.0
|
||||||
imageio-ffmpeg>=0.4.2
|
imageio-ffmpeg>=0.4.2
|
||||||
imgaug>=0.4.0
|
imgaug>=0.4.0
|
||||||
@@ -44,16 +45,19 @@ pandas
|
|||||||
panopticapi
|
panopticapi
|
||||||
plyfile>=0.7.4
|
plyfile>=0.7.4
|
||||||
psutil
|
psutil
|
||||||
|
pyclipper
|
||||||
PyMCubes
|
PyMCubes
|
||||||
pytorch-lightning
|
pytorch-lightning
|
||||||
regex
|
regex
|
||||||
scikit-image>=0.19.3
|
# <0.20.0 for compatible python3.7 python3.8
|
||||||
|
scikit-image>=0.19.3,<0.20.0
|
||||||
scikit-learn>=0.20.1
|
scikit-learn>=0.20.1
|
||||||
shapely
|
shapely
|
||||||
shotdetect_scenedetect_lgss>=0.0.4
|
shotdetect_scenedetect_lgss>=0.0.4
|
||||||
smplx
|
smplx
|
||||||
tensorflow-estimator>=1.15.1
|
tensorflow-estimator>=1.15.1
|
||||||
tf_slim
|
tf_slim
|
||||||
|
thop
|
||||||
timm>=0.4.9
|
timm>=0.4.9
|
||||||
torchmetrics>=0.6.2
|
torchmetrics>=0.6.2
|
||||||
torchsummary>=1.5.1
|
torchsummary>=1.5.1
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ gast>=0.2.2
|
|||||||
# for python3.7 python3.8 compatible
|
# for python3.7 python3.8 compatible
|
||||||
numpy<=1.22.0
|
numpy<=1.22.0
|
||||||
oss2
|
oss2
|
||||||
# for datasets compatible
|
# for datasets compatible and py37 py38 compatible
|
||||||
pandas<=1.5.3
|
pandas<1.4.0
|
||||||
Pillow>=6.2.0
|
Pillow>=6.2.0
|
||||||
# pyarrow 9.0.0 introduced event_loop core dump
|
# pyarrow 9.0.0 introduced event_loop core dump
|
||||||
pyarrow>=6.0.0,!=9.0.0
|
pyarrow>=6.0.0,!=9.0.0
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
boto3
|
boto3
|
||||||
|
embeddings
|
||||||
en_core_web_sm>=2.3.5
|
en_core_web_sm>=2.3.5
|
||||||
filelock
|
filelock
|
||||||
ftfy
|
ftfy
|
||||||
@@ -18,6 +19,7 @@ scikit_learn
|
|||||||
sentencepiece
|
sentencepiece
|
||||||
seqeval
|
seqeval
|
||||||
spacy>=2.3.5
|
spacy>=2.3.5
|
||||||
|
stanza
|
||||||
subword_nmt>=0.3.8
|
subword_nmt>=0.3.8
|
||||||
termcolor
|
termcolor
|
||||||
tokenizers
|
tokenizers
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
numpy==1.18.5
|
numpy<1.20.0
|
||||||
|
|||||||
@@ -60,48 +60,19 @@ isolated: # test cases that may require excessive anmount of GPU memory or run
|
|||||||
- test_video_deinterlace.py
|
- test_video_deinterlace.py
|
||||||
- test_image_inpainting_sdv2.py
|
- test_image_inpainting_sdv2.py
|
||||||
- test_bad_image_detecting.py
|
- test_bad_image_detecting.py
|
||||||
- test_image_portrait_stylization_trainer.py
|
|
||||||
- test_controllable_image_generation.py
|
- test_controllable_image_generation.py
|
||||||
- test_image_colorization_trainer.py
|
- test_image_colorization_trainer.py
|
||||||
|
|
||||||
envs:
|
envs:
|
||||||
default: # default env, case not in other env will in default, pytorch.
|
default: # default env, case not in other env will in default, pytorch.
|
||||||
dependencies: # requirement packages,pip install before test case run.
|
dependencies: # requirement packages,pip install before test case run.
|
||||||
- numpy>=1.20,<=1.21.0
|
- numpy>=1.20,<=1.22.0
|
||||||
- protobuf<4,>=3.20.2
|
- protobuf<4,>=3.20.2
|
||||||
|
|
||||||
tensorflow1x: # cases excuted tensorflow1.x framework.
|
tensorflow1x: # cases excuted tensorflow1.x framework.
|
||||||
requirements: # requirements files run before test case run.
|
requirements: # requirements files run before test case run.
|
||||||
- tensorflow1x.txt
|
- tensorflow1x.txt
|
||||||
dependencies: # requirement packages,pip install before test case run.
|
dependencies: # requirement packages,pip install before test case run.
|
||||||
- numpy==1.18.5
|
- numpy<1.20.0
|
||||||
tests:
|
tests:
|
||||||
- test_text_to_speech.py
|
|
||||||
- test_csanmt_translation.py
|
|
||||||
- test_translation_trainer.py
|
|
||||||
- test_translation_evaluation_trainer.py
|
|
||||||
- test_ocr_detection.py
|
|
||||||
- test_automatic_speech_recognition.py
|
|
||||||
- test_image_matting.py
|
|
||||||
- test_person_image_cartoon.py
|
|
||||||
- test_skin_retouching.py
|
|
||||||
- test_image_style_transfer.py
|
|
||||||
- test_image_portrait_stylization_trainer.py
|
- test_image_portrait_stylization_trainer.py
|
||||||
- test_language_identification.py
|
|
||||||
- test_language_guided_video_summarization_trainer.py
|
|
||||||
- test_motion_generation.py
|
|
||||||
- test_universal_matting.py
|
|
||||||
- test_dialog_modeling.py
|
|
||||||
- test_trainer.py
|
|
||||||
- test_abnormal_object_detection.py
|
|
||||||
- test_image_face_fusion.py
|
|
||||||
- test_ocr_detection_db_trainer.py
|
|
||||||
- test_language_guided_video_summarization.py
|
|
||||||
- test_interactive_translation_pipeline.py
|
|
||||||
- test_image_defrcn_fewshot_trainer.py
|
|
||||||
- test_automatic_post_editing.py
|
|
||||||
- test_human_reconstruction.py
|
|
||||||
- test_nerf_recon_acc_trainer.py
|
|
||||||
- test_nerf_recon_acc.py
|
|
||||||
- test_speech_signal_process.py
|
|
||||||
- test_tensorboard_hook.py
|
|
||||||
- test_efficient_diffusion_tuning_trainer.py
|
|
||||||
|
|||||||
Reference in New Issue
Block a user