docker file py38 and py37 compatible merge

Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/12722824
* debug

* add missing deps

* add stanza

* fix numpy issue

* numpy version

* tf case

* numpy <= 1.22.0

* fix tf 1.15.5 case

* add python38 and python37 compatible docker file

* cv add fvcore

* add easycv

* remove debug code

* fix style issue

* fix python3.7 python3.8 compatible issue

* comment

* modify run config

* scipy<=1.7.3

* revert scipy<=1.7.3

* fix compatible bug

* fix compatible bug

* add easycv

* update version: 1.6.1rc0
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/12754942
fix python3.8 and python3.8 compatible issue

* numpy version change

* numpy version

* numpy version

* fix numpy version

* fix numpy version

* update version: 1.6.1rc0
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/12754942
fix python3.8 and python3.8 compatible issue

* restore setup.py

* restore setup.py

* add build base image

* fix style

* numpy version fix

* optimize build image, split base and modelscope image

* fix style issue

* modify example path

* modify examples folder

* add SETUPTOOLS_USE_DISTUTILS=stdlib comments
This commit is contained in:
mulin.lyh
2023-06-08 10:01:58 +08:00
committed by wenmeng.zwm
parent 73f8d39897
commit 25a7398aea
13 changed files with 317 additions and 169 deletions

View File

@@ -0,0 +1,119 @@
#!/bin/bash
# default values.
BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04
BASE_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
python_version=3.7.13
torch_version=1.11.0
cudatoolkit_version=11.3
tensorflow_version=1.15.5
version=None
is_cpu=False
function usage(){
echo "usage: build.sh "
echo " --python=python_version set python version, default: $python_version"
echo " --torch=torch_version set pytorch version, fefault: $torch_version"
echo " --tensorflow=tensorflow_version set tensorflow version, default: $tensorflow_version"
echo " --version=version set image version, default: $version"
echo " --test option for run test before push image, only push on ci test pass"
echo " --cpu option for build cpu version"
echo " --dsw option for build dsw version"
echo " --ci option for build ci version"
echo " --push option for push image to remote repo"
}
for i in "$@"; do
case $i in
--python=*)
python_version="${i#*=}"
shift
;;
--torch=*)
torch_version="${i#*=}"
shift # pytorch version
;;
--tensorflow=*)
tensorflow_version="${i#*=}"
shift # tensorflow version
;;
--version=*)
version="${i#*=}"
shift # version
;;
--cpu)
is_cpu=True
shift # is cpu image
;;
--push)
is_push=True
shift # option for push image to remote repo
;;
--help)
usage
exit 0
;;
-*|--*)
echo "Unknown option $i"
usage
exit 1
;;
*)
;;
esac
done
if [ "$version" == "None" ]; then
echo "version must specify!"
exit 1
fi
if [ "$is_cpu" == "True" ]; then
export BASE_IMAGE=$BASE_CPU_IMAGE
base_tag=ubuntu20.04
export USE_GPU=False
else
export BASE_IMAGE=$BASE_GPU_IMAGE
base_tag=ubuntu20.04-cuda11.3.0
export USE_GPU=True
fi
if [[ $python_version == 3.7* ]]; then
base_tag=$base_tag-py37
elif [[ $python_version == 3.8* ]]; then
base_tag=$base_tag-py38
elif [[ $python_version == 3.9* ]]; then
base_tag=$base_tag-py39
else
echo "Unsupport python version: $python_version"
exit 1
fi
target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base-$version
export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag
export PYTHON_VERSION=$python_version
export TORCH_VERSION=$torch_version
export CUDATOOLKIT_VERSION=$cudatoolkit_version
export TENSORFLOW_VERSION=$tensorflow_version
echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\n"
docker_file_content=`cat docker/Dockerfile.ubuntu_base`
printf "$docker_file_content" > Dockerfile
while true
do
docker build -t $IMAGE_TO_BUILD \
--build-arg USE_GPU \
--build-arg BASE_IMAGE \
--build-arg PYTHON_VERSION \
--build-arg TORCH_VERSION \
--build-arg CUDATOOLKIT_VERSION \
--build-arg TENSORFLOW_VERSION \
-f Dockerfile .
if [ $? -eq 0 ]; then
echo "Image build done"
break
else
echo "Running docker build command error, we will retry"
fi
done
if [ "$is_push" == "True" ]; then
echo "Pushing image: $IMAGE_TO_BUILD"
docker push $IMAGE_TO_BUILD
fi

View File

@@ -1,7 +1,9 @@
#!/bin/bash
# default values.
BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04
BASE_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
BASE_PY38_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-base-1.6.1
BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-base-1.6.1
BASE_PY37_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-base-1.6.1
BASE_PY37_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base-1.6.1
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
python_version=3.7.13
torch_version=1.11.0
@@ -86,20 +88,30 @@ if [ "$modelscope_version" == "None" ]; then
exit 1
fi
if [ "$is_cpu" == "True" ]; then
export BASE_IMAGE=$BASE_CPU_IMAGE
base_tag=ubuntu20.04
export USE_GPU=False
else
export BASE_IMAGE=$BASE_GPU_IMAGE
base_tag=ubuntu20.04-cuda11.3.0
export USE_GPU=True
fi
if [[ $python_version == 3.7* ]]; then
if [ "$is_cpu" == "True" ]; then
echo "Building python3.7 cpu image"
export BASE_IMAGE=$BASE_PY37_CPU_IMAGE
else
echo "Building python3.7 gpu image"
export BASE_IMAGE=$BASE_PY37_GPU_IMAGE
fi
base_tag=$base_tag-py37
elif [[ $python_version == 3.8* ]]; then
if [ "$is_cpu" == "True" ]; then
echo "Building python3.8 cpu image"
export BASE_IMAGE=$BASE_PY38_CPU_IMAGE
else
echo "Building python3.8 gpu image"
export BASE_IMAGE=$BASE_PY38_GPU_IMAGE
fi
base_tag=$base_tag-py38
elif [[ $python_version == 3.9* ]]; then
base_tag=$base_tag-py39
else
echo "Unsupport python version: $python_version"
exit 1

View File

@@ -1,102 +1,5 @@
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1
FROM $BASE_IMAGE
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Asia/Shanghai
ENV CONDA_DIR /opt/conda
ENV PATH="${CONDA_DIR}/bin:${PATH}"
ENV arch=x86_64
SHELL ["/bin/bash", "-c"]
COPY docker/rcfiles /tmp/resources
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
apt-get clean && \
cp /tmp/resources/ubuntu20.04_sources.tuna /etc/apt/sources.list && \
apt-get update && \
apt-get install -y locales wget git strace gdb sox libopenmpi-dev curl strace vim ffmpeg libsm6 tzdata language-pack-zh-hans ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
dpkg -i ./git-lfs_3.2.0_amd64.deb && \
rm -f ./git-lfs_3.2.0_amd64.deb && \
locale-gen zh_CN && \
locale-gen zh_CN.utf8 && \
update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
dpkg-reconfigure --frontend noninteractive tzdata && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
#install and config python
ARG PYTHON_VERSION=3.7.13
RUN wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
/bin/bash miniconda.sh -b -p /opt/conda && \
rm -f miniconda.sh && \
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
cp /tmp/resources/conda.tuna ~/.condarc && \
source /root/.bashrc && \
conda install --yes python==${PYTHON_VERSION} && \
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
pip config set install.trusted-host mirrors.aliyun.com
ARG USE_GPU=True
# install pytorch
ARG TORCH_VERSION=1.12.0
ARG CUDATOOLKIT_VERSION=11.3
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113; \
else \
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
fi
# install tensorflow
ARG TENSORFLOW_VERSION=1.15.5
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
else \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
fi
# mmcv-full<=1.7.0 for mmdet3d compatible
RUN if [ "$USE_GPU" = "True" ] ; then \
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
else \
MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
fi
# default shell bash
ENV SHELL=/bin/bash
# install special package
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir dgl-cu113 dglgo -f https://data.dgl.ai/wheels/repo.html; \
else \
pip install --no-cache-dir dgl dglgo -f https://data.dgl.ai/wheels/repo.html; \
fi
# copy install scripts
COPY docker/scripts/install_unifold.sh docker/scripts/install_colmap.sh docker/scripts/install_pytorch3d_nvdiffrast.sh docker/scripts/install_tiny_cuda_nn.sh docker/scripts/install_apex.sh /tmp/
# for uniford
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_unifold.sh; \
else \
echo 'cpu unsupport uniford'; \
fi
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir git+https://github.com/gxd1994/Pointnet2.PyTorch.git@master#subdirectory=pointnet2; \
else \
echo 'cpu unsupport Pointnet2'; \
fi
RUN pip install --no-cache-dir detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
# 3d supports
RUN bash /tmp/install_colmap.sh
RUN bash /tmp/install_tiny_cuda_nn.sh
RUN bash /tmp/install_pytorch3d_nvdiffrast.sh
# end of 3D
# install modelscope
COPY requirements /var/modelscope
@@ -115,12 +18,25 @@ RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \
cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/
COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
RUN pip install --no-cache-dir xtcocotools==1.12 detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force
# python3.8 pip install git+https://github.com/jin-s13/xtcocoapi.git@v1.13
# pip install git+https://github.com/gatagat/lap.git@v0.4.0
RUN pip install --no-cache-dir text2sql_lgesql==1.3.0 \
git+https://github.com/jin-s13/xtcocoapi.git@v1.13 \
git+https://github.com/gatagat/lap.git@v0.4.0 \
detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
# speechbrain==0.5.7 for audio compatible
RUN pip install --no-cache-dir speechbrain==0.5.7 adaseq>=0.5.0 mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 numpy==1.18.5 wenetruntime==1.11.0 ipykernel fairseq fasttext deepspeed
RUN pip install --no-cache-dir mpi4py paint_ldm adaseq>=0.5.0 \
mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 wenetruntime==1.11.0 \
ipykernel fairseq fasttext deepspeed -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
# for cpu install cpu version faiss, faiss depends on blas lib, we install libopenblas TODO rename gpu or cpu version faiss
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_apex.sh; \
pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 faiss==1.7.2 safetensors typeguard==2.13.3 scikit-learn 'pandas<1.4.0' pai-easycv librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
else \
echo 'cpu unsupport apex'; \
pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/faiss-1.7.2-py37-none-linux_x86_64.whl safetensors typeguard==2.13.3 scikit-learn 'pandas<1.4.0' pai-easycv librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
fi
COPY examples /modelscope/examples
# for pai-easycv setup compatiblity issue
ENV SETUPTOOLS_USE_DISTUTILS=stdlib

View File

@@ -0,0 +1,136 @@
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
FROM $BASE_IMAGE
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Asia/Shanghai
ENV CONDA_DIR /opt/conda
ENV PATH="${CONDA_DIR}/bin:${PATH}"
ENV arch=x86_64
SHELL ["/bin/bash", "-c"]
COPY docker/rcfiles /tmp/resources
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
apt-get clean && \
cp /tmp/resources/sources.list.aliyun /etc/apt/sources.list && \
apt-get update && \
apt-get install -y locales wget git strace gdb sox libopenmpi-dev curl \
libgeos-dev strace vim ffmpeg libsm6 tzdata language-pack-zh-hans \
ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
dpkg -i ./git-lfs_3.2.0_amd64.deb && \
rm -f ./git-lfs_3.2.0_amd64.deb && \
locale-gen zh_CN && \
locale-gen zh_CN.utf8 && \
update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
dpkg-reconfigure --frontend noninteractive tzdata && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
#install and config python
ARG PYTHON_VERSION=3.7.13
# Miniconda3-py37_23.1.0-1-Linux-x86_64.sh is last python3.7 version
RUN if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-py37_23.1.0-1-Linux-x86_64.sh -O ./miniconda.sh && \
/bin/bash miniconda.sh -b -p /opt/conda && \
rm -f miniconda.sh && \
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
cp /tmp/resources/conda.tuna ~/.condarc && \
source /root/.bashrc && \
conda install --yes python==${PYTHON_VERSION} && \
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
pip config set install.trusted-host mirrors.aliyun.com;\
else \
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
/bin/bash miniconda.sh -b -p /opt/conda && \
rm -f miniconda.sh && \
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
cp /tmp/resources/conda.tuna ~/.condarc && \
source /root/.bashrc && \
conda install --yes python==${PYTHON_VERSION} && \
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
pip config set install.trusted-host mirrors.aliyun.com;\
fi
ARG USE_GPU=True
# install pytorch
ARG TORCH_VERSION=1.12.0
ARG CUDATOOLKIT_VERSION=cu113
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113; \
else \
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
fi
# install tensorflow
ARG TENSORFLOW_VERSION=1.15.5
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
else \
# only python 3.7 has tensorflow 1.15.5
if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
else \
pip install --no-cache-dir numpy==1.18.5 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/tensorflow-1.15.5-cp38-cp38-linux_x86_64.whl; \
fi \
fi
# mmcv-full<=1.7.0 for mmdet3d compatible
RUN if [ "$USE_GPU" = "True" ] ; then \
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
else \
MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
fi
# default shell bash
ENV SHELL=/bin/bash
# install special package
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir dgl-cu113 dglgo -f https://data.dgl.ai/wheels/repo.html; \
else \
pip install --no-cache-dir dgl==0.9.0 dglgo -f https://data.dgl.ai/wheels/repo.html; \
fi
# copy install scripts
COPY docker/scripts/install_unifold.sh docker/scripts/install_colmap.sh docker/scripts/install_pytorch3d_nvdiffrast.sh docker/scripts/install_tiny_cuda_nn.sh docker/scripts/install_apex.sh /tmp/
# for uniford
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_unifold.sh; \
else \
echo 'cpu unsupport uniford'; \
fi
RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir git+https://github.com/gxd1994/Pointnet2.PyTorch.git@master#subdirectory=pointnet2; \
else \
echo 'cpu unsupport Pointnet2'; \
fi
# 3d supports
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_colmap.sh; \
else \
echo 'cpu unsupport colmap'; \
fi
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_tiny_cuda_nn.sh \
else \
echo 'cpu unsupport tiny_cudann'; \
fi
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_pytorch3d_nvdiffrast.sh; \
else \
echo 'cpu unsupport pytorch3d nvdiffrast'; \
fi
# end of 3D
# install apex after deepspeed
RUN if [ "$USE_GPU" = "True" ] ; then \
bash /tmp/install_apex.sh; \
else \
echo 'cpu unsupport apex'; \
fi

View File

@@ -1,25 +1,14 @@
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted
deb https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
# deb-src https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted
deb https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic universe
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic universe
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates universe
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates universe
deb https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic multiverse
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-updates multiverse
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates multiverse
# deb https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
# deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu bionic-security main restricted
# deb-src http://mirrors.aliyun.com/ubuntu bionic-security main restricted
deb http://mirrors.aliyun.com/ubuntu bionic-security universe
# deb-src http://mirrors.aliyun.com/ubuntu bionic-security universe
deb http://mirrors.aliyun.com/ubuntu bionic-security multiverse
# deb-src http://mirrors.aliyun.com/ubuntu bionic-security multiverse
deb https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse

View File

@@ -1,6 +1,7 @@
export MAX_JOBS=16 \
&& git clone https://github.com/NVIDIA/apex \
&& cd apex \
&& TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6" pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \
&& git checkout 6bd01c4b99a84648ad5e5238a959735e6936c813 \
&& TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6" pip install -v --disable-pip-version-check --no-cache --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \
&& cd .. \
&& rm -rf apex

View File

@@ -1,7 +1,6 @@
kaldiio
kwsbp>=0.0.6
matplotlib
numpy
py_sound_connect>=0.1
scipy
SoundFile>0.10

View File

@@ -2,7 +2,6 @@ hyperpyyaml
librosa==0.9.2
MinDAEC
mir_eval>=0.7
numpy
rotary_embedding_torch>=0.1.5
scipy
SoundFile>0.10

View File

@@ -16,6 +16,7 @@ fastai>=1.0.51
ffmpeg>=1.4
ffmpeg-python>=0.2.0
ftfy
fvcore
imageio>=2.9.0
imageio-ffmpeg>=0.4.2
imgaug>=0.4.0
@@ -44,16 +45,19 @@ pandas
panopticapi
plyfile>=0.7.4
psutil
pyclipper
PyMCubes
pytorch-lightning
regex
scikit-image>=0.19.3
# <0.20.0 for compatible python3.7 python3.8
scikit-image>=0.19.3,<0.20.0
scikit-learn>=0.20.1
shapely
shotdetect_scenedetect_lgss>=0.0.4
smplx
tensorflow-estimator>=1.15.1
tf_slim
thop
timm>=0.4.9
torchmetrics>=0.6.2
torchsummary>=1.5.1

View File

@@ -7,8 +7,8 @@ gast>=0.2.2
# for python3.7 python3.8 compatible
numpy<=1.22.0
oss2
# for datasets compatible
pandas<=1.5.3
# for datasets compatible and py37 py38 compatible
pandas<1.4.0
Pillow>=6.2.0
# pyarrow 9.0.0 introduced event_loop core dump
pyarrow>=6.0.0,!=9.0.0

View File

@@ -1,4 +1,5 @@
boto3
embeddings
en_core_web_sm>=2.3.5
filelock
ftfy
@@ -18,6 +19,7 @@ scikit_learn
sentencepiece
seqeval
spacy>=2.3.5
stanza
subword_nmt>=0.3.8
termcolor
tokenizers

View File

@@ -1 +1 @@
numpy==1.18.5
numpy<1.20.0

View File

@@ -60,48 +60,19 @@ isolated: # test cases that may require excessive anmount of GPU memory or run
- test_video_deinterlace.py
- test_image_inpainting_sdv2.py
- test_bad_image_detecting.py
- test_image_portrait_stylization_trainer.py
- test_controllable_image_generation.py
- test_image_colorization_trainer.py
envs:
default: # default env, case not in other env will in default, pytorch.
dependencies: # requirement packagespip install before test case run.
- numpy>=1.20,<=1.21.0
- numpy>=1.20,<=1.22.0
- protobuf<4,>=3.20.2
tensorflow1x: # cases excuted tensorflow1.x framework.
requirements: # requirements files run before test case run.
- tensorflow1x.txt
dependencies: # requirement packagespip install before test case run.
- numpy==1.18.5
- numpy<1.20.0
tests:
- test_text_to_speech.py
- test_csanmt_translation.py
- test_translation_trainer.py
- test_translation_evaluation_trainer.py
- test_ocr_detection.py
- test_automatic_speech_recognition.py
- test_image_matting.py
- test_person_image_cartoon.py
- test_skin_retouching.py
- test_image_style_transfer.py
- test_image_portrait_stylization_trainer.py
- test_language_identification.py
- test_language_guided_video_summarization_trainer.py
- test_motion_generation.py
- test_universal_matting.py
- test_dialog_modeling.py
- test_trainer.py
- test_abnormal_object_detection.py
- test_image_face_fusion.py
- test_ocr_detection_db_trainer.py
- test_language_guided_video_summarization.py
- test_interactive_translation_pipeline.py
- test_image_defrcn_fewshot_trainer.py
- test_automatic_post_editing.py
- test_human_reconstruction.py
- test_nerf_recon_acc_trainer.py
- test_nerf_recon_acc.py
- test_speech_signal_process.py
- test_tensorboard_hook.py
- test_efficient_diffusion_tuning_trainer.py