mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-22 19:19:21 +01:00
Merge pull request #667 from modelscope/master-merge-internal20231208
Master merge internal20231208
This commit is contained in:
@@ -1,19 +1,24 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# default values.
|
# default values.
|
||||||
BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04
|
BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu
|
||||||
BASE_GPU_CUDA113_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
BASE_GPU_CUDA113_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
||||||
BASE_GPU_CUDA117_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.7.1-cudnn8-devel
|
BASE_GPU_CUDA117_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.7.1-cudnn8-devel
|
||||||
BASE_GPU_CUDA118_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.8.0-cudnn8-devel
|
BASE_GPU_CUDA118_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.8.0-cudnn8-devel
|
||||||
|
BASE_GPU_CUDA121_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:22.04-cuda11.8.0-cudnn8-devel
|
||||||
|
BASE_GPU_CUDA122_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:22.04-cuda11.2.2-cudnn8-devel
|
||||||
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
|
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
|
||||||
python_version=3.7.13
|
python_version=3.7.13
|
||||||
torch_version=1.11.0
|
torch_version=1.11.0
|
||||||
cuda_version=11.7.1
|
cuda_version=11.7.1
|
||||||
cudatoolkit_version=11.3
|
cudatoolkit_version=11.3
|
||||||
tensorflow_version=1.15.5
|
tensorflow_version=1.15.5
|
||||||
|
os_version=20.04
|
||||||
version=None
|
version=None
|
||||||
is_cpu=False
|
is_cpu=False
|
||||||
|
is_dryrun=False
|
||||||
function usage(){
|
function usage(){
|
||||||
echo "usage: build.sh "
|
echo "usage: build.sh "
|
||||||
|
echo " --os=ubuntu_version set ubuntu os version, default: 20.04"
|
||||||
echo " --python=python_version set python version, default: $python_version"
|
echo " --python=python_version set python version, default: $python_version"
|
||||||
echo " --cuda=cuda_version set cuda version,only[11.3.0, 11.7.1], fefault: $cuda_version"
|
echo " --cuda=cuda_version set cuda version,only[11.3.0, 11.7.1], fefault: $cuda_version"
|
||||||
echo " --torch=torch_version set pytorch version, fefault: $torch_version"
|
echo " --torch=torch_version set pytorch version, fefault: $torch_version"
|
||||||
@@ -21,9 +26,14 @@ function usage(){
|
|||||||
echo " --test option for run test before push image, only push on ci test pass"
|
echo " --test option for run test before push image, only push on ci test pass"
|
||||||
echo " --cpu option for build cpu version"
|
echo " --cpu option for build cpu version"
|
||||||
echo " --push option for push image to remote repo"
|
echo " --push option for push image to remote repo"
|
||||||
|
echo " --dryrun create Dockerfile not build"
|
||||||
}
|
}
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
case $i in
|
case $i in
|
||||||
|
--os=*)
|
||||||
|
os_version="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--python=*)
|
--python=*)
|
||||||
python_version="${i#*=}"
|
python_version="${i#*=}"
|
||||||
shift
|
shift
|
||||||
@@ -52,6 +62,10 @@ for i in "$@"; do
|
|||||||
is_push=True
|
is_push=True
|
||||||
shift # option for push image to remote repo
|
shift # option for push image to remote repo
|
||||||
;;
|
;;
|
||||||
|
--dryrun)
|
||||||
|
is_dryrun=True
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--help)
|
--help)
|
||||||
usage
|
usage
|
||||||
exit 0
|
exit 0
|
||||||
@@ -68,7 +82,7 @@ done
|
|||||||
|
|
||||||
if [ "$cuda_version" == 11.3.0 ]; then
|
if [ "$cuda_version" == 11.3.0 ]; then
|
||||||
echo "Building base image cuda11.3.0"
|
echo "Building base image cuda11.3.0"
|
||||||
BASE_GPU_IMAGE=$BASE_GPU_CUDA113_IMAGE
|
BASE_GPU_IMAGE=$os_version-$cudatoolkit_version-cudnn8-devel
|
||||||
cudatoolkit_version=cu113
|
cudatoolkit_version=cu113
|
||||||
elif [ "$cuda_version" == 11.7.1 ]; then
|
elif [ "$cuda_version" == 11.7.1 ]; then
|
||||||
echo "Building base image cuda11.7.1"
|
echo "Building base image cuda11.7.1"
|
||||||
@@ -77,43 +91,55 @@ elif [ "$cuda_version" == 11.7.1 ]; then
|
|||||||
elif [ "$cuda_version" == 11.8.0 ]; then
|
elif [ "$cuda_version" == 11.8.0 ]; then
|
||||||
echo "Building base image cuda11.8.0"
|
echo "Building base image cuda11.8.0"
|
||||||
cudatoolkit_version=cu118
|
cudatoolkit_version=cu118
|
||||||
BASE_GPU_IMAGE=$BASE_GPU_CUDA118_IMAGE
|
BASE_GPU_IMAGE=$MODELSCOPE_REPO_ADDRESS:$os_version-cuda$cuda_version-cudnn8-devel
|
||||||
|
elif [ "$cuda_version" == 12.1.0 ]; then
|
||||||
|
cudatoolkit_version=cu121
|
||||||
|
BASE_GPU_IMAGE=$BASE_GPU_CUDA121_IMAGE
|
||||||
else
|
else
|
||||||
echo "Unsupport cuda version: $cuda_version"
|
echo "Unsupport cuda version: $cuda_version"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
if [ "$is_cpu" == "True" ]; then
|
||||||
export BASE_IMAGE=$BASE_CPU_IMAGE
|
export BASE_IMAGE=$BASE_CPU_IMAGE:$os_version
|
||||||
base_tag=ubuntu20.04
|
base_tag=ubuntu$os_version
|
||||||
export USE_GPU=False
|
export USE_GPU=False
|
||||||
else
|
else
|
||||||
export BASE_IMAGE=$BASE_GPU_IMAGE
|
export BASE_IMAGE=$BASE_GPU_IMAGE
|
||||||
base_tag=ubuntu20.04-cuda$cuda_version
|
base_tag=ubuntu$os_version-cuda$cuda_version
|
||||||
export USE_GPU=True
|
export USE_GPU=True
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $python_version == 3.7* ]]; then
|
if [[ $python_version == 3.7* ]]; then
|
||||||
base_tag=$base_tag-py37
|
base_tag=$base_tag-py37
|
||||||
elif [[ $python_version == 3.8* ]]; then
|
elif [[ $python_version == 3.8* ]]; then
|
||||||
base_tag=$base_tag-py38
|
base_tag=$base_tag-py38
|
||||||
|
elif [[ $python_version == 3.10* ]]; then
|
||||||
|
base_tag=$base_tag-py310
|
||||||
else
|
else
|
||||||
echo "Unsupport python version: $python_version"
|
echo "Unsupport python version: $python_version"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base
|
target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base
|
||||||
export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag
|
export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag
|
||||||
export PYTHON_VERSION=$python_version
|
export PYTHON_VERSION=$python_version
|
||||||
export TORCH_VERSION=$torch_version
|
export TORCH_VERSION=$torch_version
|
||||||
export CUDATOOLKIT_VERSION=$cudatoolkit_version
|
export CUDATOOLKIT_VERSION=$cudatoolkit_version
|
||||||
export TENSORFLOW_VERSION=$tensorflow_version
|
export TENSORFLOW_VERSION=$tensorflow_version
|
||||||
|
echo "From: $BASE_IMAGE build: $target_image_tag"
|
||||||
echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\n"
|
echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\n"
|
||||||
docker_file_content=`cat docker/Dockerfile.ubuntu_base`
|
docker_file_content=`cat docker/Dockerfile.ubuntu_base`
|
||||||
printf "$docker_file_content" > Dockerfile
|
printf "$docker_file_content" > Dockerfile
|
||||||
|
|
||||||
|
if [ "$is_dryrun" == "True" ]; then
|
||||||
|
echo 'Dockerfile created'
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# DOCKER_BUILDKIT=0
|
||||||
while true
|
while true
|
||||||
do
|
do
|
||||||
docker build -t $IMAGE_TO_BUILD \
|
DOCKER_BUILDKIT=0 docker build -t $IMAGE_TO_BUILD \
|
||||||
--build-arg USE_GPU \
|
--build-arg USE_GPU \
|
||||||
--build-arg BASE_IMAGE \
|
--build-arg BASE_IMAGE \
|
||||||
--build-arg PYTHON_VERSION \
|
--build-arg PYTHON_VERSION \
|
||||||
|
|||||||
@@ -44,6 +44,8 @@ for i in "$@"; do
|
|||||||
cudatoolkit_version=11.7
|
cudatoolkit_version=11.7
|
||||||
elif [ "$cuda_version" == "11.8.0" ]; then
|
elif [ "$cuda_version" == "11.8.0" ]; then
|
||||||
cudatoolkit_version=11.8
|
cudatoolkit_version=11.8
|
||||||
|
elif [ "$cuda_version" == "12.1.0" ]; then
|
||||||
|
cudatoolkit_version=12.1
|
||||||
else
|
else
|
||||||
echo "Unsupport cuda version $cuda_version"
|
echo "Unsupport cuda version $cuda_version"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -130,6 +132,17 @@ elif [[ $python_version == 3.8* ]]; then
|
|||||||
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda$cuda_version-py38-torch$torch_version-tf$tensorflow_version-base
|
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda$cuda_version-py38-torch$torch_version-tf$tensorflow_version-base
|
||||||
fi
|
fi
|
||||||
base_tag=$base_tag-py38
|
base_tag=$base_tag-py38
|
||||||
|
elif [[ $python_version == 3.10* ]]; then
|
||||||
|
if [ "$is_cpu" == "True" ]; then
|
||||||
|
echo "Building python3.10 cpu image"
|
||||||
|
base_tag=ubuntu22.04-py310
|
||||||
|
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-py310-torch$torch_version-tf$tensorflow_version-base
|
||||||
|
else
|
||||||
|
echo "Building python3.10 gpu image"
|
||||||
|
base_tag=ubuntu22.04-cuda$cuda_version-py310
|
||||||
|
# reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda12.1.0-py310-torch2.1.0-tf2.14.0-base
|
||||||
|
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda$cuda_version-py310-torch$torch_version-tf$tensorflow_version-base
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "Unsupport python version: $python_version"
|
echo "Unsupport python version: $python_version"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -150,7 +163,9 @@ echo -e "Building image with:\npython$python_version\npytorch$torch_version\nten
|
|||||||
docker_file_content=`cat docker/Dockerfile.ubuntu`
|
docker_file_content=`cat docker/Dockerfile.ubuntu`
|
||||||
if [ "$is_ci_test" != "True" ]; then
|
if [ "$is_ci_test" != "True" ]; then
|
||||||
echo "Building ModelScope lib, will install ModelScope lib to image"
|
echo "Building ModelScope lib, will install ModelScope lib to image"
|
||||||
docker_file_content="${docker_file_content} \nRUN pip install --no-cache-dir -U funasr transformers && pip install --no-cache-dir https://modelscope.oss-cn-beijing.aliyuncs.com/releases/build/modelscope-$modelscope_version-py3-none-any.whl "
|
docker_file_content="${docker_file_content} \nRUN export COMMIT_ID=$CIS_ENV_COMMIT_ID && pip install --no-cache-dir -U adaseq pai-easycv ms_swift funasr 'transformers<4.35.0'"
|
||||||
|
docker_file_content="${docker_file_content} \nRUN pip uninstall modelscope -y && export COMMIT_ID=$CIS_ENV_COMMIT_ID && cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $CIS_ENV_BRANCH --single-branch $REPO_URL && cd MaaS-lib && pip install . && cd / && rm -fr /tmp/MaaS-lib"
|
||||||
|
MMCV_WITH_OPS=1 MAX_JOBS=32 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
|
||||||
fi
|
fi
|
||||||
echo "$is_dsw"
|
echo "$is_dsw"
|
||||||
if [ "$is_dsw" == "False" ]; then
|
if [ "$is_dsw" == "False" ]; then
|
||||||
@@ -159,23 +174,22 @@ else
|
|||||||
echo "Building dsw image will need set ModelScope lib cache location."
|
echo "Building dsw image will need set ModelScope lib cache location."
|
||||||
docker_file_content="${docker_file_content} \nENV MODELSCOPE_CACHE=/mnt/workspace/.cache/modelscope"
|
docker_file_content="${docker_file_content} \nENV MODELSCOPE_CACHE=/mnt/workspace/.cache/modelscope"
|
||||||
# pre compile extension
|
# pre compile extension
|
||||||
docker_file_content="${docker_file_content} \nRUN python -c 'from modelscope.utils.pre_compile import pre_compile_all;pre_compile_all()'"
|
docker_file_content="${docker_file_content} \nRUN export TORCH_CUDA_ARCH_LIST='6.0;6.1;7.0;7.5;8.0;8.9;9.0;8.6+PTX' && python -c 'from modelscope.utils.pre_compile import pre_compile_all;pre_compile_all()'"
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
echo 'build cpu image'
|
|
||||||
else
|
|
||||||
# fix easycv extension and tinycudann conflict.
|
|
||||||
docker_file_content="${docker_file_content} \nRUN bash /tmp/install_tiny_cuda_nn.sh"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
if [ "$is_ci_test" == "True" ]; then
|
if [ "$is_ci_test" == "True" ]; then
|
||||||
echo "Building CI image, uninstall modelscope"
|
echo "Building CI image, uninstall modelscope"
|
||||||
docker_file_content="${docker_file_content} \nRUN pip uninstall modelscope -y"
|
docker_file_content="${docker_file_content} \nRUN pip uninstall modelscope -y"
|
||||||
fi
|
fi
|
||||||
|
docker_file_content="${docker_file_content} \n RUN cp /tmp/resources/conda.aliyun ~/.condarc && \
|
||||||
|
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||||
|
pip config set install.trusted-host mirrors.aliyun.com && \
|
||||||
|
cp /tmp/resources/ubuntu2204.aliyun /etc/apt/sources.list "
|
||||||
|
|
||||||
printf "$docker_file_content" > Dockerfile
|
printf "$docker_file_content" > Dockerfile
|
||||||
|
|
||||||
while true
|
while true
|
||||||
do
|
do
|
||||||
docker build -t $IMAGE_TO_BUILD \
|
DOCKER_BUILDKIT=0 docker build -t $IMAGE_TO_BUILD \
|
||||||
--build-arg USE_GPU \
|
--build-arg USE_GPU \
|
||||||
--build-arg BASE_IMAGE \
|
--build-arg BASE_IMAGE \
|
||||||
--build-arg PYTHON_VERSION \
|
--build-arg PYTHON_VERSION \
|
||||||
|
|||||||
4
.github/workflows/publish.yaml
vendored
4
.github/workflows/publish.yaml
vendored
@@ -15,10 +15,10 @@ jobs:
|
|||||||
#if: startsWith(github.event.ref, 'refs/tags')
|
#if: startsWith(github.event.ref, 'refs/tags')
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python 3.7
|
- name: Set up Python 3.10
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.10'
|
||||||
- name: Install wheel
|
- name: Install wheel
|
||||||
run: pip install wheel && pip install -r requirements/framework.txt
|
run: pip install wheel && pip install -r requirements/framework.txt
|
||||||
- name: Build ModelScope
|
- name: Build ModelScope
|
||||||
|
|||||||
@@ -1,10 +1,47 @@
|
|||||||
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base
|
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base
|
||||||
FROM $BASE_IMAGE
|
FROM $BASE_IMAGE
|
||||||
|
RUN apt-get update && \
|
||||||
RUN apt-get update && apt-get install -y iputils-ping net-tools iproute2 && \
|
apt-get install -y libsox-dev unzip zip iputils-ping telnet && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
# install modelscope
|
|
||||||
|
# install jupyter plugin
|
||||||
|
RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \
|
||||||
|
cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/
|
||||||
|
|
||||||
|
COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
|
||||||
|
# python3.8 pip install git+https://github.com/jin-s13/xtcocoapi.git@v1.13
|
||||||
|
# pip install git+https://github.com/gatagat/lap.git@v0.4.0
|
||||||
|
RUN pip install --no-cache-dir numpy 'cython<=0.29.36' funtextprocessing kwsbp==0.0.6 safetensors typeguard==2.13.3 scikit-learn librosa==0.9.2 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
RUN pip install --no-cache-dir adaseq text2sql_lgesql==1.3.0 \
|
||||||
|
git+https://github.com/jin-s13/xtcocoapi.git@v1.14 \
|
||||||
|
git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
||||||
|
|
||||||
|
RUN mv /opt/conda/compiler_compat/ld /opt/conda/compiler_compat/ldbk && \
|
||||||
|
pip install --no-cache-dir mpi4py paint_ldm \
|
||||||
|
mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 \
|
||||||
|
ipykernel fasttext fairseq deepspeed -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
ARG USE_GPU
|
||||||
|
|
||||||
|
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0" pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git'; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport detectron2'; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# torchmetrics==0.11.4 for ofa
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr optimum && \
|
||||||
|
pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ && \
|
||||||
|
pip install --no-cache-dir -U xformers --index-url https://download.pytorch.org/whl/cu118 && \
|
||||||
|
pip install --no-cache-dir flash_attn==2.3.3+torch2.1cu118 tinycudann==1.7+cu118 vllm==0.2.1+cu118torch2.1 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport vllm auto-gptq'; \
|
||||||
|
fi
|
||||||
|
|
||||||
COPY requirements /var/modelscope
|
COPY requirements /var/modelscope
|
||||||
RUN pip install --no-cache-dir --upgrade pip && \
|
RUN pip install --no-cache-dir --upgrade pip && \
|
||||||
pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||||
@@ -16,47 +53,6 @@ RUN pip install --no-cache-dir --upgrade pip && \
|
|||||||
pip install --no-cache-dir -r /var/modelscope/tests.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
pip install --no-cache-dir -r /var/modelscope/tests.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
||||||
pip cache purge
|
pip cache purge
|
||||||
|
|
||||||
# install jupyter plugin
|
|
||||||
RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \
|
|
||||||
cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/
|
|
||||||
|
|
||||||
COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
|
|
||||||
# python3.8 pip install git+https://github.com/jin-s13/xtcocoapi.git@v1.13
|
|
||||||
# pip install git+https://github.com/gatagat/lap.git@v0.4.0
|
|
||||||
RUN pip install --no-cache-dir text2sql_lgesql==1.3.0 \
|
|
||||||
git+https://github.com/jin-s13/xtcocoapi.git@v1.13 \
|
|
||||||
git+https://github.com/gatagat/lap.git@v0.4.0 \
|
|
||||||
detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
|
||||||
|
|
||||||
RUN pip install --no-cache-dir mpi4py paint_ldm \
|
|
||||||
mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 pai-easycv ms_swift \
|
|
||||||
ipykernel fasttext fairseq deepspeed -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
|
||||||
|
|
||||||
ARG USE_GPU
|
|
||||||
# for cpu install cpu version faiss, faiss depends on blas lib, we install libopenblas TODO rename gpu or cpu version faiss
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 faiss==1.7.2 safetensors typeguard==2.13.3 scikit-learn librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
|
||||||
else \
|
|
||||||
pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/faiss-1.7.2-py37-none-linux_x86_64.whl safetensors typeguard==2.13.3 scikit-learn librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN pip install --no-cache-dir wenetruntime==1.11.0 adaseq --no-deps
|
|
||||||
COPY examples /modelscope/examples
|
COPY examples /modelscope/examples
|
||||||
|
|
||||||
# for pai-easycv setup compatiblity issue
|
|
||||||
ENV SETUPTOOLS_USE_DISTUTILS=stdlib
|
ENV SETUPTOOLS_USE_DISTUTILS=stdlib
|
||||||
|
ENV VLLM_USE_MODELSCOPE=True
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6" pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git'; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport detectron2'; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# torchmetrics==0.11.4 for ofa
|
|
||||||
RUN pip install --no-cache-dir jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator 'protobuf<=3.20.0' bitsandbytes basicsr
|
|
||||||
COPY docker/scripts/install_flash_attension.sh /tmp/install_flash_attension.sh
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
bash /tmp/install_flash_attension.sh; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport flash attention'; \
|
|
||||||
fi
|
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ SHELL ["/bin/bash", "-c"]
|
|||||||
COPY docker/rcfiles /tmp/resources
|
COPY docker/rcfiles /tmp/resources
|
||||||
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
|
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
|
||||||
RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
|
RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
|
||||||
apt-get clean && \
|
apt-get install -y apt-utils openssh-server locales wget git strace gdb sox libopenmpi-dev curl \
|
||||||
cp /tmp/resources/sources.list.aliyun /etc/apt/sources.list && \
|
iputils-ping net-tools iproute2 autoconf automake gperf libre2-dev libssl-dev \
|
||||||
apt-get update && \
|
libtool libcurl4-openssl-dev libb64-dev libgoogle-perftools-dev patchelf \
|
||||||
apt-get install -y locales wget git strace gdb sox libopenmpi-dev curl \
|
rapidjson-dev scons software-properties-common pkg-config unzip zlib1g-dev \
|
||||||
|
libarchive-dev libxml2-dev libnuma-dev \
|
||||||
libgeos-dev strace vim ffmpeg libsm6 tzdata language-pack-zh-hans \
|
libgeos-dev strace vim ffmpeg libsm6 tzdata language-pack-zh-hans \
|
||||||
ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
|
ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build && \
|
||||||
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
|
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
|
||||||
@@ -27,33 +28,17 @@ RUN apt-get update && apt-get install -y --reinstall ca-certificates && \
|
|||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
|
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
|
||||||
|
RUN wget -O /tmp/boost.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.80.0/source/boost_1_80_0.tar.gz && (cd /tmp && tar xzf boost.tar.gz) && mv /tmp/boost_1_80_0/boost /usr/include/boost
|
||||||
|
|
||||||
#install and config python
|
#install and config python
|
||||||
ARG PYTHON_VERSION=3.7.13
|
ARG PYTHON_VERSION=3.10.13
|
||||||
# Miniconda3-py37_23.1.0-1-Linux-x86_64.sh is last python3.7 version
|
# Miniconda3-py37_23.1.0-1-Linux-x86_64.sh is last python3.7 version
|
||||||
RUN if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
|
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-py310_23.9.0-0-Linux-x86_64.sh -O ./miniconda.sh && \
|
||||||
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-py37_23.1.0-1-Linux-x86_64.sh -O ./miniconda.sh && \
|
|
||||||
/bin/bash miniconda.sh -b -p /opt/conda && \
|
/bin/bash miniconda.sh -b -p /opt/conda && \
|
||||||
rm -f miniconda.sh && \
|
rm -f miniconda.sh && \
|
||||||
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||||
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||||
cp /tmp/resources/conda.tuna ~/.condarc && \
|
source /root/.bashrc
|
||||||
source /root/.bashrc && \
|
|
||||||
conda install --yes python==${PYTHON_VERSION} && \
|
|
||||||
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
|
||||||
pip config set install.trusted-host mirrors.aliyun.com;\
|
|
||||||
else \
|
|
||||||
wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
|
|
||||||
/bin/bash miniconda.sh -b -p /opt/conda && \
|
|
||||||
rm -f miniconda.sh && \
|
|
||||||
ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
|
||||||
echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
|
||||||
cp /tmp/resources/conda.tuna ~/.condarc && \
|
|
||||||
source /root/.bashrc && \
|
|
||||||
conda install --yes python==${PYTHON_VERSION} && \
|
|
||||||
pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
|
||||||
pip config set install.trusted-host mirrors.aliyun.com;\
|
|
||||||
fi
|
|
||||||
|
|
||||||
ARG USE_GPU=True
|
ARG USE_GPU=True
|
||||||
|
|
||||||
@@ -85,12 +70,6 @@ RUN if [ "$USE_GPU" = "True" ] ; then \
|
|||||||
fi \
|
fi \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# mmcv-full<=1.7.0 for mmdet3d compatible
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
|
|
||||||
else \
|
|
||||||
MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# default shell bash
|
# default shell bash
|
||||||
ENV SHELL=/bin/bash
|
ENV SHELL=/bin/bash
|
||||||
@@ -98,12 +77,25 @@ ENV SHELL=/bin/bash
|
|||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
pip install dgl -f https://data.dgl.ai/wheels/$CUDATOOLKIT_VERSION/repo.html; \
|
pip install dgl -f https://data.dgl.ai/wheels/$CUDATOOLKIT_VERSION/repo.html; \
|
||||||
else \
|
else \
|
||||||
pip install --no-cache-dir dgl==0.9.0 dglgo -f https://data.dgl.ai/wheels/repo.html; \
|
pip install --no-cache-dir dgl dglgo -f https://data.dgl.ai/wheels/repo.html; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# copy install scripts
|
# copy install scripts
|
||||||
COPY docker/scripts/install_unifold.sh docker/scripts/install_colmap.sh docker/scripts/install_pytorch3d_nvdiffrast.sh docker/scripts/install_tiny_cuda_nn.sh docker/scripts/install_apex.sh /tmp/
|
COPY docker/scripts/install_unifold.sh docker/scripts/install_colmap.sh docker/scripts/install_pytorch3d_nvdiffrast.sh docker/scripts/install_tiny_cuda_nn.sh docker/scripts/install_apex.sh /tmp/
|
||||||
|
|
||||||
|
# 3d supports
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
bash /tmp/install_colmap.sh; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport colmap'; \
|
||||||
|
fi
|
||||||
|
# install pytorch3d
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
bash /tmp/install_pytorch3d_nvdiffrast.sh; \
|
||||||
|
else \
|
||||||
|
echo 'cpu unsupport pytorch3d nvdiffrast'; \
|
||||||
|
fi
|
||||||
|
|
||||||
# for uniford
|
# for uniford
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
bash /tmp/install_unifold.sh; \
|
bash /tmp/install_unifold.sh; \
|
||||||
@@ -112,28 +104,11 @@ RUN if [ "$USE_GPU" = "True" ] ; then \
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
export TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0;7.5;8.0;8.6+PTX" && pip install --no-cache-dir git+https://github.com/gxd1994/Pointnet2.PyTorch.git@master#subdirectory=pointnet2; \
|
export TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0;7.5;8.0;8.9;9.0;8.6+PTX" && pip install --no-cache-dir git+https://github.com/gxd1994/Pointnet2.PyTorch.git@master#subdirectory=pointnet2; \
|
||||||
else \
|
else \
|
||||||
echo 'cpu unsupport Pointnet2'; \
|
echo 'cpu unsupport Pointnet2'; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# 3d supports
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
bash /tmp/install_colmap.sh; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport colmap'; \
|
|
||||||
fi
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
bash /tmp/install_tiny_cuda_nn.sh \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport tiny_cudann'; \
|
|
||||||
fi
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
bash /tmp/install_pytorch3d_nvdiffrast.sh; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport pytorch3d nvdiffrast'; \
|
|
||||||
fi
|
|
||||||
# end of 3D
|
|
||||||
# install apex after deepspeed
|
# install apex after deepspeed
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
bash /tmp/install_apex.sh; \
|
bash /tmp/install_apex.sh; \
|
||||||
@@ -141,4 +116,10 @@ RUN if [ "$USE_GPU" = "True" ] ; then \
|
|||||||
echo 'cpu unsupport apex'; \
|
echo 'cpu unsupport apex'; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
|
pip install --no-cache-dir https://modelscope.oss-cn-beijing.aliyuncs.com/packages/mmcv_full-1.7.0-cp310-cp310-linux_x86_64.whl; \
|
||||||
|
else \
|
||||||
|
pip install --no-cache-dir mmcv_full==1.7.0+torch2.1cpu -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
||||||
|
fi
|
||||||
|
RUN conda install imageio-ffmpeg -c conda-forge -y
|
||||||
ENTRYPOINT []
|
ENTRYPOINT []
|
||||||
|
|||||||
14
docker/rcfiles/conda.aliyun
Normal file
14
docker/rcfiles/conda.aliyun
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
channels:
|
||||||
|
- defaults
|
||||||
|
show_channel_urls: true
|
||||||
|
default_channels:
|
||||||
|
- http://mirrors.aliyun.com/anaconda/pkgs/main
|
||||||
|
- http://mirrors.aliyun.com/anaconda/pkgs/r
|
||||||
|
- http://mirrors.aliyun.com/anaconda/pkgs/msys2
|
||||||
|
custom_channels:
|
||||||
|
conda-forge: http://mirrors.aliyun.com/anaconda/cloud
|
||||||
|
msys2: http://mirrors.aliyun.com/anaconda/cloud
|
||||||
|
bioconda: http://mirrors.aliyun.com/anaconda/cloud
|
||||||
|
menpo: http://mirrors.aliyun.com/anaconda/cloud
|
||||||
|
pytorch: http://mirrors.aliyun.com/anaconda/cloud
|
||||||
|
simpleitk: http://mirrors.aliyun.com/anaconda/cloud
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
channels:
|
|
||||||
- defaults
|
|
||||||
show_channel_urls: true
|
|
||||||
default_channels:
|
|
||||||
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
|
|
||||||
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/r
|
|
||||||
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/msys2
|
|
||||||
custom_channels:
|
|
||||||
conda-forge: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
|
|
||||||
msys2: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
|
|
||||||
bioconda: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
|
|
||||||
menpo: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
|
|
||||||
pytorch: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
|
|
||||||
pytorch-lts: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
|
|
||||||
simpleitk: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
[global]
|
|
||||||
index-url=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
||||||
10
docker/rcfiles/ubuntu2204.aliyun
Normal file
10
docker/rcfiles/ubuntu2204.aliyun
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
deb http://mirrors.aliyun.com/ubuntu/ jammy main restricted universe multiverse
|
||||||
|
#deb-src http://mirrors.aliyun.com/ubuntu/ jammy main restricted universe multiverse
|
||||||
|
deb http://mirrors.aliyun.com/ubuntu/ jammy-security main restricted universe multiverse
|
||||||
|
#deb-src http://mirrors.aliyun.com/ubuntu/ jammy-security main restricted universe multiverse
|
||||||
|
deb http://mirrors.aliyun.com/ubuntu/ jammy-updates main restricted universe multiverse
|
||||||
|
#deb-src http://mirrors.aliyun.com/ubuntu/ jammy-updates main restricted universe multiverse
|
||||||
|
#deb http://mirrors.aliyun.com/ubuntu/ jammy-proposed main restricted universe multiverse
|
||||||
|
#deb-src http://mirrors.aliyun.com/ubuntu/ jammy-proposed main restricted universe multiverse
|
||||||
|
deb http://mirrors.aliyun.com/ubuntu/ jammy-backports main restricted universe multiverse
|
||||||
|
#deb-src http://mirrors.aliyun.com/ubuntu/ jammy-backports main restricted universe multiverse
|
||||||
@@ -2,6 +2,6 @@ export MAX_JOBS=16 \
|
|||||||
&& git clone https://github.com/NVIDIA/apex \
|
&& git clone https://github.com/NVIDIA/apex \
|
||||||
&& cd apex \
|
&& cd apex \
|
||||||
&& git checkout 6bd01c4b99a84648ad5e5238a959735e6936c813 \
|
&& git checkout 6bd01c4b99a84648ad5e5238a959735e6936c813 \
|
||||||
&& TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6" pip install -v --disable-pip-version-check --no-cache --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \
|
&& TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.9;9.0;8.6+PTX" pip install -v --disable-pip-version-check --no-cache --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \
|
||||||
&& cd .. \
|
&& cd .. \
|
||||||
&& rm -rf apex
|
&& rm -rf apex
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ wget -q https://cmake.org/files/v3.25/cmake-3.25.2-linux-x86_64.sh \
|
|||||||
&& export CMAKE_BUILD_PARALLEL_LEVEL=36 \
|
&& export CMAKE_BUILD_PARALLEL_LEVEL=36 \
|
||||||
&& export MAX_JOBS=16 \
|
&& export MAX_JOBS=16 \
|
||||||
&& export CUDA_ARCHITECTURES="all" \
|
&& export CUDA_ARCHITECTURES="all" \
|
||||||
&& git clone --depth 1 --branch 3.8 https://github.com/colmap/colmap.git \
|
&& git clone https://github.com/colmap/colmap.git \
|
||||||
&& cd colmap \
|
&& cd colmap \
|
||||||
&& mkdir build \
|
&& mkdir build \
|
||||||
&& cd build \
|
&& cd build \
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
git clone -b v2.3.2 https://github.com/Dao-AILab/flash-attention && \
|
git clone -b v2.3.3 https://github.com/Dao-AILab/flash-attention && \
|
||||||
cd flash-attention && python setup.py install && \
|
cd flash-attention && MAX_JOBS=46 python setup.py install && \
|
||||||
cd .. && \
|
cd .. && \
|
||||||
rm -rf flash-attention
|
rm -rf flash-attention
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
export CMAKE_BUILD_PARALLEL_LEVEL=36 \
|
export CMAKE_BUILD_PARALLEL_LEVEL=36 \
|
||||||
&& export MAX_JOBS=36 \
|
&& export MAX_JOBS=36 \
|
||||||
&& export CMAKE_CUDA_ARCHITECTURES="50;52;60;61;70;75;80;86" \
|
&& export CMAKE_CUDA_ARCHITECTURES="50;52;60;61;70;75;80;8.6+PTX;87;89;90" \
|
||||||
|
&& export TORCH_CUDA_ARCH_LIST="5.0;5.2;6.0;6.1;7.0;7.5;8.0;8.6+PTX;8.7;8.9;9.0" \
|
||||||
&& git clone --branch 2.1.0 --recursive https://github.com/NVIDIA/thrust.git \
|
&& git clone --branch 2.1.0 --recursive https://github.com/NVIDIA/thrust.git \
|
||||||
&& cd thrust \
|
&& cd thrust \
|
||||||
&& mkdir build \
|
&& mkdir build \
|
||||||
@@ -10,7 +11,11 @@ export CMAKE_BUILD_PARALLEL_LEVEL=36 \
|
|||||||
&& cd ../.. \
|
&& cd ../.. \
|
||||||
&& rm -rf thrust \
|
&& rm -rf thrust \
|
||||||
&& pip install --no-cache-dir fvcore iopath \
|
&& pip install --no-cache-dir fvcore iopath \
|
||||||
&& pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable" \
|
&& curl -LO https://github.com/NVIDIA/cub/archive/2.1.0.tar.gz \
|
||||||
|
&& tar xzf 2.1.0.tar.gz \
|
||||||
|
&& export CUB_HOME=$PWD/cub-2.1.0 \
|
||||||
|
&& FORCE_CUDA=1 pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable" \
|
||||||
|
&& rm -fr 2.1.0.tar.gz $PWD/cub-2.1.0 \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install -y --no-install-recommends pkg-config libglvnd0 libgl1 libglx0 libegl1 libgles2 libglvnd-dev libgl1-mesa-dev libegl1-mesa-dev libgles2-mesa-dev -y \
|
&& apt-get install -y --no-install-recommends pkg-config libglvnd0 libgl1 libglx0 libegl1 libgles2 libglvnd-dev libgl1-mesa-dev libegl1-mesa-dev libgles2-mesa-dev -y \
|
||||||
&& git clone https://github.com/NVlabs/nvdiffrast.git \
|
&& git clone https://github.com/NVlabs/nvdiffrast.git \
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
export CMAKE_BUILD_PARALLEL_LEVEL=36 && export MAX_JOBS=36 && export TCNN_CUDA_ARCHITECTURES="50;52;60;61;70;75;80;86" \
|
export CMAKE_BUILD_PARALLEL_LEVEL=36 && export MAX_JOBS=36 && export TCNN_CUDA_ARCHITECTURES="50;52;60;61;70;75;80;89;90;86" \
|
||||||
&& git clone --recursive https://github.com/nvlabs/tiny-cuda-nn \
|
&& git clone --recursive https://github.com/nvlabs/tiny-cuda-nn \
|
||||||
&& cd tiny-cuda-nn \
|
&& cd tiny-cuda-nn \
|
||||||
&& git checkout v1.6 \
|
|
||||||
&& cd bindings/torch \
|
&& cd bindings/torch \
|
||||||
&& python setup.py install \
|
&& python setup.py install \
|
||||||
&& cd ../../.. \
|
&& cd ../../.. \
|
||||||
|
|||||||
41
docs/source/server.md
Normal file
41
docs/source/server.md
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# modelscope server使用
|
||||||
|
## 1. 通用服务
|
||||||
|
modelscope库基于fastapi开发一个简单模型服务,可以通过一条命令拉起绝大多数模型
|
||||||
|
使用方法:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
modelscope server --model_id=modelscope/Llama-2-7b-chat-ms --revision=v1.0.5
|
||||||
|
```
|
||||||
|
我们提供的官方镜像中也可以一个命令启动(镜像还未完成)
|
||||||
|
```bash
|
||||||
|
docker run --rm --name maas_dev --shm-size=50gb --gpus='"device=0"' -e MODELSCOPE_CACHE=/modelscope_cache -v /host_path_to_modelscope_cache:/modelscope_cache -p 8000:8000 reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda11.8.0-py310-torch2.1.0-tf2.14.0-1.9.5-server modelscope server --model_id=modelscope/Llama-2-7b-chat-ms --revision=v1.0.5
|
||||||
|
```
|
||||||
|
服务默认监听8000端口,您也可以通过--port改变端口,默认服务提供两个接口,接口文档您可以通过
|
||||||
|
http://ip:port/docs查看
|
||||||
|
通过describe接口,可以获取服务输入输出信息以及输入sample数据,如下图:
|
||||||
|

|
||||||
|
服务调用接口,可以直接拷贝describe接口example示例数据,如下图:
|
||||||
|

|
||||||
|
|
||||||
|
## 2. vllm大模型推理
|
||||||
|
对于LLM我们提供了vllm推理支持,目前只有部分模型支持vllm。
|
||||||
|
|
||||||
|
### 2.1 vllm直接支持modelscope模型
|
||||||
|
可以通过设置环境变量使得vllm从www.modelscope.cn下载模型。
|
||||||
|
|
||||||
|
启动普通server
|
||||||
|
```bash
|
||||||
|
VLLM_USE_MODELSCOPE=True python -m vllm.entrypoints.api_server --model="damo/nlp_gpt2_text-generation_english-base" --revision="v1.0.0"
|
||||||
|
```
|
||||||
|
启动openai兼容接口
|
||||||
|
```bash
|
||||||
|
VLLM_USE_MODELSCOPE=True python -m vllm.entrypoints.openai.api_server --model="damo/nlp_gpt2_text-generation_english-base" --revision="v1.0.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
如果模型在modelscope cache目录已经存在,则会直接使用cache中的模型,否则会从www.modelscope.cn下载模型。
|
||||||
|
|
||||||
|
通过modelscope官方镜像启动vllm,指定端口为9090
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm --name maas_dev --shm-size=50gb --gpus='"device=0"' -e MODELSCOPE_CACHE=/modelscope_cache -v /host_path_to_modelscope_cache:/modelscope_cache -p 9090:9090 reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda11.8.0-py310-torch2.1.0-tf2.14.0-1.9.5-server python -m vllm.entrypoints.api_server --model "modelscope/Llama-2-7b-chat-ms" --revision "v1.0.5" --port 9090
|
||||||
|
```
|
||||||
@@ -6,6 +6,7 @@ from modelscope.cli.download import DownloadCMD
|
|||||||
from modelscope.cli.modelcard import ModelCardCMD
|
from modelscope.cli.modelcard import ModelCardCMD
|
||||||
from modelscope.cli.pipeline import PipelineCMD
|
from modelscope.cli.pipeline import PipelineCMD
|
||||||
from modelscope.cli.plugins import PluginsCMD
|
from modelscope.cli.plugins import PluginsCMD
|
||||||
|
from modelscope.cli.server import ServerCMD
|
||||||
|
|
||||||
|
|
||||||
def run_cmd():
|
def run_cmd():
|
||||||
@@ -17,6 +18,7 @@ def run_cmd():
|
|||||||
PluginsCMD.define_args(subparsers)
|
PluginsCMD.define_args(subparsers)
|
||||||
PipelineCMD.define_args(subparsers)
|
PipelineCMD.define_args(subparsers)
|
||||||
ModelCardCMD.define_args(subparsers)
|
ModelCardCMD.define_args(subparsers)
|
||||||
|
ServerCMD.define_args(subparsers)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|||||||
40
modelscope/cli/server.py
Normal file
40
modelscope/cli/server.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
||||||
|
import os
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
from string import Template
|
||||||
|
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
|
from modelscope.cli.base import CLICommand
|
||||||
|
from modelscope.server.api_server import add_server_args, get_app
|
||||||
|
from modelscope.utils.logger import get_logger
|
||||||
|
|
||||||
|
logger = get_logger()
|
||||||
|
|
||||||
|
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
template_path = os.path.join(current_path, 'template')
|
||||||
|
|
||||||
|
|
||||||
|
def subparser_func(args):
|
||||||
|
""" Function which will be called for a specific sub parser.
|
||||||
|
"""
|
||||||
|
return ServerCMD(args)
|
||||||
|
|
||||||
|
|
||||||
|
class ServerCMD(CLICommand):
|
||||||
|
name = 'server'
|
||||||
|
|
||||||
|
def __init__(self, args):
|
||||||
|
self.args = args
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def define_args(parsers: ArgumentParser):
|
||||||
|
""" define args for create pipeline template command.
|
||||||
|
"""
|
||||||
|
parser = parsers.add_parser(ServerCMD.name)
|
||||||
|
add_server_args(parser)
|
||||||
|
parser.set_defaults(func=subparser_func)
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
app = get_app(self.args)
|
||||||
|
uvicorn.run(app, host=self.args.host, port=self.args.port)
|
||||||
@@ -493,8 +493,9 @@ class HubApi:
|
|||||||
if len(revisions) > 0:
|
if len(revisions) > 0:
|
||||||
revision = revisions[0] # use latest revision before release time.
|
revision = revisions[0] # use latest revision before release time.
|
||||||
else:
|
else:
|
||||||
|
revision = MASTER_MODEL_BRANCH
|
||||||
vl = '[%s]' % ','.join(all_revisions)
|
vl = '[%s]' % ','.join(all_revisions)
|
||||||
raise NoValidRevisionError('Model revision should be specified from revisions: %s' % (vl))
|
logger.warning('Model revision should be specified from revisions: %s' % (vl))
|
||||||
logger.warning('Model revision not specified, use revision: %s' % revision)
|
logger.warning('Model revision not specified, use revision: %s' % revision)
|
||||||
else:
|
else:
|
||||||
# use user-specified revision
|
# use user-specified revision
|
||||||
@@ -600,7 +601,7 @@ class HubApi:
|
|||||||
cookies = ModelScopeConfig.get_cookies()
|
cookies = ModelScopeConfig.get_cookies()
|
||||||
r = self.session.get(datahub_url, cookies=cookies)
|
r = self.session.get(datahub_url, cookies=cookies)
|
||||||
resp = r.json()
|
resp = r.json()
|
||||||
datahub_raise_on_error(datahub_url, resp)
|
datahub_raise_on_error(datahub_url, resp, r)
|
||||||
dataset_id = resp['Data']['Id']
|
dataset_id = resp['Data']['Id']
|
||||||
dataset_type = resp['Data']['Type']
|
dataset_type = resp['Data']['Type']
|
||||||
return dataset_id, dataset_type
|
return dataset_id, dataset_type
|
||||||
@@ -613,7 +614,7 @@ class HubApi:
|
|||||||
cookies=cookies,
|
cookies=cookies,
|
||||||
headers=self.builder_headers(self.headers))
|
headers=self.builder_headers(self.headers))
|
||||||
resp = r.json()
|
resp = r.json()
|
||||||
datahub_raise_on_error(datahub_url, resp)
|
datahub_raise_on_error(datahub_url, resp, r)
|
||||||
file_list = resp['Data']
|
file_list = resp['Data']
|
||||||
if file_list is None:
|
if file_list is None:
|
||||||
raise NotExistError(
|
raise NotExistError(
|
||||||
@@ -866,7 +867,7 @@ class HubApi:
|
|||||||
cookies=cookies,
|
cookies=cookies,
|
||||||
headers={'user-agent': ModelScopeConfig.get_user_agent()})
|
headers={'user-agent': ModelScopeConfig.get_user_agent()})
|
||||||
resp = r.json()
|
resp = r.json()
|
||||||
datahub_raise_on_error(url, resp)
|
datahub_raise_on_error(url, resp, r)
|
||||||
return resp['Data']
|
return resp['Data']
|
||||||
|
|
||||||
def dataset_download_statistics(self, dataset_name: str, namespace: str, use_streaming: bool) -> None:
|
def dataset_download_statistics(self, dataset_name: str, namespace: str, use_streaming: bool) -> None:
|
||||||
|
|||||||
@@ -117,12 +117,13 @@ def raise_on_error(rsp):
|
|||||||
raise RequestError(rsp['Message'])
|
raise RequestError(rsp['Message'])
|
||||||
|
|
||||||
|
|
||||||
def datahub_raise_on_error(url, rsp):
|
def datahub_raise_on_error(url, rsp, http_response: requests.Response):
|
||||||
"""If response error, raise exception
|
"""If response error, raise exception
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The request url
|
url (str): The request url
|
||||||
rsp (HTTPResponse): The server response.
|
rsp (HTTPResponse): The server response.
|
||||||
|
http_response: the origin http response.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
RequestError: the http request error.
|
RequestError: the http request error.
|
||||||
@@ -133,7 +134,7 @@ def datahub_raise_on_error(url, rsp):
|
|||||||
if rsp.get('Code') == HTTPStatus.OK:
|
if rsp.get('Code') == HTTPStatus.OK:
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
request_id = get_request_id(rsp)
|
request_id = get_request_id(http_response)
|
||||||
raise RequestError(
|
raise RequestError(
|
||||||
f"Url = {url}, Request id={request_id} Message = {rsp.get('Message')},\
|
f"Url = {url}, Request id={request_id} Message = {rsp.get('Message')},\
|
||||||
Please specify correct dataset_name and namespace.")
|
Please specify correct dataset_name and namespace.")
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from modelscope.metainfo import Models
|
|||||||
from modelscope.models import MODELS, TorchModel
|
from modelscope.models import MODELS, TorchModel
|
||||||
from modelscope.models.audio.sv.fusion import AFF
|
from modelscope.models.audio.sv.fusion import AFF
|
||||||
from modelscope.utils.constant import Tasks
|
from modelscope.utils.constant import Tasks
|
||||||
|
from modelscope.utils.device import create_device
|
||||||
|
|
||||||
|
|
||||||
class ReLU(nn.Hardtanh):
|
class ReLU(nn.Hardtanh):
|
||||||
@@ -314,6 +315,7 @@ class SpeakerVerificationERes2Net(TorchModel):
|
|||||||
self.m_channels = self.model_config['channels']
|
self.m_channels = self.model_config['channels']
|
||||||
self.other_config = kwargs
|
self.other_config = kwargs
|
||||||
self.feature_dim = 80
|
self.feature_dim = 80
|
||||||
|
self.device = create_device(self.other_config['device'])
|
||||||
|
|
||||||
self.embedding_model = ERes2Net(
|
self.embedding_model = ERes2Net(
|
||||||
embed_dim=self.embed_dim, m_channels=self.m_channels)
|
embed_dim=self.embed_dim, m_channels=self.m_channels)
|
||||||
@@ -321,6 +323,7 @@ class SpeakerVerificationERes2Net(TorchModel):
|
|||||||
pretrained_model_name = kwargs['pretrained_model']
|
pretrained_model_name = kwargs['pretrained_model']
|
||||||
self.__load_check_point(pretrained_model_name)
|
self.__load_check_point(pretrained_model_name)
|
||||||
|
|
||||||
|
self.embedding_model.to(self.device)
|
||||||
self.embedding_model.eval()
|
self.embedding_model.eval()
|
||||||
|
|
||||||
def forward(self, audio):
|
def forward(self, audio):
|
||||||
@@ -333,7 +336,7 @@ class SpeakerVerificationERes2Net(TorchModel):
|
|||||||
) == 2, 'modelscope error: the shape of input audio to model needs to be [N, T]'
|
) == 2, 'modelscope error: the shape of input audio to model needs to be [N, T]'
|
||||||
# audio shape: [N, T]
|
# audio shape: [N, T]
|
||||||
feature = self.__extract_feature(audio)
|
feature = self.__extract_feature(audio)
|
||||||
embedding = self.embedding_model(feature)
|
embedding = self.embedding_model(feature.to(self.device))
|
||||||
|
|
||||||
return embedding.detach().cpu()
|
return embedding.detach().cpu()
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from modelscope.metainfo import Models
|
|||||||
from modelscope.models import MODELS, TorchModel
|
from modelscope.models import MODELS, TorchModel
|
||||||
from modelscope.models.audio.sv.fusion import AFF
|
from modelscope.models.audio.sv.fusion import AFF
|
||||||
from modelscope.utils.constant import Tasks
|
from modelscope.utils.constant import Tasks
|
||||||
|
from modelscope.utils.device import create_device
|
||||||
|
|
||||||
|
|
||||||
class ReLU(nn.Hardtanh):
|
class ReLU(nn.Hardtanh):
|
||||||
@@ -308,12 +309,13 @@ class SpeakerVerificationERes2Net(TorchModel):
|
|||||||
self.model_config = model_config
|
self.model_config = model_config
|
||||||
self.other_config = kwargs
|
self.other_config = kwargs
|
||||||
self.feature_dim = 80
|
self.feature_dim = 80
|
||||||
|
self.device = create_device(self.other_config['device'])
|
||||||
self.embedding_model = ERes2Net_aug()
|
self.embedding_model = ERes2Net_aug()
|
||||||
|
|
||||||
pretrained_model_name = kwargs['pretrained_model']
|
pretrained_model_name = kwargs['pretrained_model']
|
||||||
self.__load_check_point(pretrained_model_name)
|
self.__load_check_point(pretrained_model_name)
|
||||||
|
|
||||||
|
self.embedding_model.to(self.device)
|
||||||
self.embedding_model.eval()
|
self.embedding_model.eval()
|
||||||
|
|
||||||
def forward(self, audio):
|
def forward(self, audio):
|
||||||
@@ -326,7 +328,7 @@ class SpeakerVerificationERes2Net(TorchModel):
|
|||||||
) == 2, 'modelscope error: the shape of input audio to model needs to be [N, T]'
|
) == 2, 'modelscope error: the shape of input audio to model needs to be [N, T]'
|
||||||
# audio shape: [N, T]
|
# audio shape: [N, T]
|
||||||
feature = self.__extract_feature(audio)
|
feature = self.__extract_feature(audio)
|
||||||
embedding = self.embedding_model(feature)
|
embedding = self.embedding_model(feature.to(self.device))
|
||||||
|
|
||||||
return embedding.detach().cpu()
|
return embedding.detach().cpu()
|
||||||
|
|
||||||
|
|||||||
@@ -767,6 +767,7 @@ def align_img(img, lm, lm3D, mask=None, target_size=224., rescale_factor=102.):
|
|||||||
|
|
||||||
# calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face
|
# calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face
|
||||||
t, s = POS(lm5p.transpose(), lm3D.transpose())
|
t, s = POS(lm5p.transpose(), lm3D.transpose())
|
||||||
|
t = t.squeeze()
|
||||||
s = rescale_factor / s
|
s = rescale_factor / s
|
||||||
|
|
||||||
# processing the image
|
# processing the image
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ class ReferYoutubeVOSPostProcess(nn.Module):
|
|||||||
1) # remove the padding
|
1) # remove the padding
|
||||||
# resize the masks back to their original frames dataset size for evaluation:
|
# resize the masks back to their original frames dataset size for evaluation:
|
||||||
original_frames_size = video_metadata['original_frame_size']
|
original_frames_size = video_metadata['original_frame_size']
|
||||||
tuple_size = tuple(original_frames_size.cpu().numpy())
|
tuple_size = tuple(original_frames_size.cpu())
|
||||||
video_pred_masks = F.interpolate(
|
video_pred_masks = F.interpolate(
|
||||||
video_pred_masks.float(), size=tuple_size, mode='nearest')
|
video_pred_masks.float(), size=tuple_size, mode='nearest')
|
||||||
video_pred_masks = video_pred_masks.to(torch.uint8).cpu()
|
video_pred_masks = video_pred_masks.to(torch.uint8).cpu()
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ class Human3DAnimationPipeline(Pipeline):
|
|||||||
(case_name, action_name))
|
(case_name, action_name))
|
||||||
exec_path = os.path.join(self.model_dir, 'skinning.py')
|
exec_path = os.path.join(self.model_dir, 'skinning.py')
|
||||||
|
|
||||||
cmd = f'blender -b -P {exec_path} -- --input {self.case_dir}' \
|
cmd = f'{self.blender} -b -P {exec_path} -- --input {self.case_dir}' \
|
||||||
f' --gltf_path {gltf_path} --action {self.action}'
|
f' --gltf_path {gltf_path} --action {self.action}'
|
||||||
os.system(cmd)
|
os.system(cmd)
|
||||||
return gltf_path
|
return gltf_path
|
||||||
@@ -83,9 +83,6 @@ class Human3DAnimationPipeline(Pipeline):
|
|||||||
mesh = read_obj(mesh_path)
|
mesh = read_obj(mesh_path)
|
||||||
tex = cv2.imread(tex_path)
|
tex = cv2.imread(tex_path)
|
||||||
vertices = mesh['vertices']
|
vertices = mesh['vertices']
|
||||||
cent = (vertices.max(axis=0) + vertices.min(axis=0)) / 2
|
|
||||||
new_cent = (0, 1.8 / 2, 0)
|
|
||||||
vertices -= (cent - new_cent)
|
|
||||||
mesh['vertices'] = vertices
|
mesh['vertices'] = vertices
|
||||||
mesh['texture_map'] = tex
|
mesh['texture_map'] = tex
|
||||||
write_obj(mesh_path, mesh)
|
write_obj(mesh_path, mesh)
|
||||||
@@ -108,6 +105,11 @@ class Human3DAnimationPipeline(Pipeline):
|
|||||||
else:
|
else:
|
||||||
save_dir = None
|
save_dir = None
|
||||||
|
|
||||||
|
if 'blender' in input:
|
||||||
|
self.blender = input['blender']
|
||||||
|
else:
|
||||||
|
self.blender = 'blender'
|
||||||
|
|
||||||
if case_id.endswith('.obj'):
|
if case_id.endswith('.obj'):
|
||||||
mesh_path = case_id
|
mesh_path = case_id
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -68,6 +68,8 @@ class Human3DRenderPipeline(Pipeline):
|
|||||||
|
|
||||||
def format_nvdiffrast_format(self, mesh, tex):
|
def format_nvdiffrast_format(self, mesh, tex):
|
||||||
vert = mesh['vertices']
|
vert = mesh['vertices']
|
||||||
|
cent = (vert.max(axis=0) + vert.min(axis=0)) / 2
|
||||||
|
vert -= cent
|
||||||
tri = mesh['faces']
|
tri = mesh['faces']
|
||||||
tri = tri - 1 if tri.min() == 1 else tri
|
tri = tri - 1 if tri.min() == 1 else tri
|
||||||
vert_uv = mesh['uvs']
|
vert_uv = mesh['uvs']
|
||||||
@@ -81,7 +83,7 @@ class Human3DRenderPipeline(Pipeline):
|
|||||||
tex = torch.from_numpy(tex.astype(np.float32) / 255.0).cuda()
|
tex = torch.from_numpy(tex.astype(np.float32) / 255.0).cuda()
|
||||||
return vtx_pos, pos_idx, vtx_uv, uv_idx, tex
|
return vtx_pos, pos_idx, vtx_uv, uv_idx, tex
|
||||||
|
|
||||||
def render_scene(self, mesh_path):
|
def render_scene(self, mesh_path, resolution=512):
|
||||||
if not os.path.exists(mesh_path):
|
if not os.path.exists(mesh_path):
|
||||||
logger.info('can not found %s, use default one' % mesh_path)
|
logger.info('can not found %s, use default one' % mesh_path)
|
||||||
mesh_path = os.path.join(self.model_dir, '3D-assets',
|
mesh_path = os.path.join(self.model_dir, '3D-assets',
|
||||||
@@ -99,8 +101,8 @@ class Human3DRenderPipeline(Pipeline):
|
|||||||
frames_normals = []
|
frames_normals = []
|
||||||
for i in tqdm.tqdm(range(frame_length)):
|
for i in tqdm.tqdm(range(frame_length)):
|
||||||
proj = projection(x=0.4, n=1.0, f=200.0)
|
proj = projection(x=0.4, n=1.0, f=200.0)
|
||||||
a_rot = np.matmul(rotate_x(-0.1), rotate_y(ang))
|
a_rot = np.matmul(rotate_x(0.0), rotate_y(ang))
|
||||||
a_mv = np.matmul(translate(0, 0, -2.5), a_rot)
|
a_mv = np.matmul(translate(0, 0, -2.7), a_rot)
|
||||||
r_mvp = np.matmul(proj, a_mv).astype(np.float32)
|
r_mvp = np.matmul(proj, a_mv).astype(np.float32)
|
||||||
pred_img, pred_mask, normal = render(
|
pred_img, pred_mask, normal = render(
|
||||||
glctx,
|
glctx,
|
||||||
@@ -110,7 +112,7 @@ class Human3DRenderPipeline(Pipeline):
|
|||||||
vtx_uv,
|
vtx_uv,
|
||||||
uv_idx,
|
uv_idx,
|
||||||
tex,
|
tex,
|
||||||
resolution=512,
|
resolution=resolution,
|
||||||
enable_mip=False,
|
enable_mip=False,
|
||||||
max_mip_level=9)
|
max_mip_level=9)
|
||||||
color = np.clip(
|
color = np.clip(
|
||||||
@@ -123,7 +125,7 @@ class Human3DRenderPipeline(Pipeline):
|
|||||||
frames_normals.append(normals)
|
frames_normals.append(normals)
|
||||||
ang = ang + step
|
ang = ang + step
|
||||||
|
|
||||||
logger.info('load case %s done'
|
logger.info('render case %s done'
|
||||||
% os.path.basename(os.path.dirname(mesh_path)))
|
% os.path.basename(os.path.dirname(mesh_path)))
|
||||||
|
|
||||||
return mesh, frames_color, frames_normals
|
return mesh, frames_color, frames_normals
|
||||||
@@ -131,6 +133,10 @@ class Human3DRenderPipeline(Pipeline):
|
|||||||
def forward(self, input: Dict[str, Any]) -> Dict[str, Any]:
|
def forward(self, input: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
dataset_id = input['dataset_id']
|
dataset_id = input['dataset_id']
|
||||||
case_id = input['case_id']
|
case_id = input['case_id']
|
||||||
|
if 'resolution' in input:
|
||||||
|
resolution = input['resolution']
|
||||||
|
else:
|
||||||
|
resolution = 512
|
||||||
if case_id.endswith('.obj'):
|
if case_id.endswith('.obj'):
|
||||||
mesh_path = case_id
|
mesh_path = case_id
|
||||||
else:
|
else:
|
||||||
@@ -142,7 +148,7 @@ class Human3DRenderPipeline(Pipeline):
|
|||||||
case_dir = os.path.join(data_dir, case_id)
|
case_dir = os.path.join(data_dir, case_id)
|
||||||
mesh_path = os.path.join(case_dir, 'body.obj')
|
mesh_path = os.path.join(case_dir, 'body.obj')
|
||||||
|
|
||||||
mesh, colors, normals = self.render_scene(mesh_path)
|
mesh, colors, normals = self.render_scene(mesh_path, resolution)
|
||||||
|
|
||||||
results = {
|
results = {
|
||||||
'mesh': mesh,
|
'mesh': mesh,
|
||||||
|
|||||||
0
modelscope/server/__init__.py
Normal file
0
modelscope/server/__init__.py
Normal file
0
modelscope/server/api/__init__.py
Normal file
0
modelscope/server/api/__init__.py
Normal file
0
modelscope/server/api/routers/__init__.py
Normal file
0
modelscope/server/api/routers/__init__.py
Normal file
14
modelscope/server/api/routers/health.py
Normal file
14
modelscope/server/api/routers/health.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
from faulthandler import disable
|
||||||
|
from http import HTTPStatus
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
from fastapi import APIRouter
|
||||||
|
|
||||||
|
from modelscope.server.models.output import ApiResponse
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
@router.get('', response_model=ApiResponse[Dict], status_code=200)
|
||||||
|
def health() -> Any:
|
||||||
|
return ApiResponse[Dict](Data={}, Code=HTTPStatus.OK, Success=True)
|
||||||
45
modelscope/server/api/routers/model_router.py
Normal file
45
modelscope/server/api/routers/model_router.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
from fastapi import APIRouter, Body
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from starlette.requests import Request
|
||||||
|
|
||||||
|
from modelscope.utils.input_output import \
|
||||||
|
pipeline_output_to_service_base64_output # noqa E125
|
||||||
|
from modelscope.utils.input_output import call_pipeline_with_json
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
@router.post('/call')
|
||||||
|
async def inference(
|
||||||
|
request: Request,
|
||||||
|
body: BaseModel = Body(examples=[{
|
||||||
|
'usage': 'copy body from describe'
|
||||||
|
}])): # noqa E125
|
||||||
|
"""Inference general interface.
|
||||||
|
|
||||||
|
For image, video, audio etc binary data, need encoded with base64.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request (Request): The request object.
|
||||||
|
request_info (ModelScopeRequest): The post body.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ApiResponse: For binary field, encoded with base64
|
||||||
|
"""
|
||||||
|
pipeline_service = request.app.state.pipeline
|
||||||
|
pipeline_info = request.app.state.pipeline_info
|
||||||
|
request_json = await request.json()
|
||||||
|
result = call_pipeline_with_json(pipeline_info, pipeline_service,
|
||||||
|
request_json)
|
||||||
|
# convert output to json, if binary field, we need encoded.
|
||||||
|
output = pipeline_output_to_service_base64_output(
|
||||||
|
pipeline_info['task_name'], result)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
@router.get('/describe')
|
||||||
|
async def describe(request: Request):
|
||||||
|
info = {}
|
||||||
|
info['schema'] = request.app.state.pipeline_info
|
||||||
|
info['sample'] = request.app.state.pipeline_sample
|
||||||
|
return info
|
||||||
8
modelscope/server/api/routers/router.py
Normal file
8
modelscope/server/api/routers/router.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
from fastapi import APIRouter
|
||||||
|
from starlette.routing import Route, WebSocketRoute
|
||||||
|
|
||||||
|
from modelscope.server.api.routers import health, model_router
|
||||||
|
|
||||||
|
api_router = APIRouter()
|
||||||
|
api_router.include_router(model_router.router, tags=['prediction'], prefix='')
|
||||||
|
api_router.include_router(health.router, tags=['health'], prefix='/health')
|
||||||
45
modelscope/server/api_server.py
Normal file
45
modelscope/server/api_server.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
import argparse
|
||||||
|
|
||||||
|
import uvicorn
|
||||||
|
from fastapi import FastAPI
|
||||||
|
|
||||||
|
from modelscope.server.api.routers.router import api_router
|
||||||
|
from modelscope.server.core.event_handlers import (start_app_handler,
|
||||||
|
stop_app_handler)
|
||||||
|
|
||||||
|
|
||||||
|
def get_app(args) -> FastAPI:
|
||||||
|
app = FastAPI(
|
||||||
|
title='modelscope_server',
|
||||||
|
version='0.1',
|
||||||
|
debug=True,
|
||||||
|
swagger_ui_parameters={'tryItOutEnabled': True})
|
||||||
|
app.state.args = args
|
||||||
|
app.include_router(api_router)
|
||||||
|
|
||||||
|
app.add_event_handler('startup', start_app_handler(app))
|
||||||
|
app.add_event_handler('shutdown', stop_app_handler(app))
|
||||||
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
def add_server_args(parser):
|
||||||
|
parser.add_argument(
|
||||||
|
'--model_id', required=True, type=str, help='The target model id')
|
||||||
|
parser.add_argument(
|
||||||
|
'--revision', required=True, type=str, help='Model revision')
|
||||||
|
parser.add_argument('--host', default='0.0.0.0', help='Host to listen')
|
||||||
|
parser.add_argument('--port', type=int, default=8000, help='Server port')
|
||||||
|
parser.add_argument('--debug', default='debug', help='Set debug level.')
|
||||||
|
parser.add_argument(
|
||||||
|
'--llm_first',
|
||||||
|
type=bool,
|
||||||
|
default=True,
|
||||||
|
help='Use LLMPipeline first for llm models.')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser('modelscope_server')
|
||||||
|
add_server_args(parser)
|
||||||
|
args = parser.parse_args()
|
||||||
|
app = get_app(args)
|
||||||
|
uvicorn.run(app, host=args.host, port=args.port)
|
||||||
0
modelscope/server/core/__init__.py
Normal file
0
modelscope/server/core/__init__.py
Normal file
47
modelscope/server/core/event_handlers.py
Normal file
47
modelscope/server/core/event_handlers.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
from fastapi import FastAPI
|
||||||
|
|
||||||
|
from modelscope.utils.input_output import ( # yapf: disable
|
||||||
|
create_pipeline, get_pipeline_information_by_pipeline,
|
||||||
|
get_task_input_examples, get_task_schemas)
|
||||||
|
from modelscope.utils.logger import get_logger
|
||||||
|
|
||||||
|
# control the model start stop
|
||||||
|
|
||||||
|
logger = get_logger()
|
||||||
|
|
||||||
|
|
||||||
|
def _startup_model(app: FastAPI) -> None:
|
||||||
|
logger.info('download model and create pipeline')
|
||||||
|
app.state.pipeline = create_pipeline(app.state.args.model_id,
|
||||||
|
app.state.args.revision,
|
||||||
|
app.state.args.llm_first)
|
||||||
|
info = {}
|
||||||
|
info['task_name'] = app.state.pipeline.group_key
|
||||||
|
info['schema'] = get_task_schemas(app.state.pipeline.group_key)
|
||||||
|
app.state.pipeline_info = info
|
||||||
|
app.state.pipeline_sample = get_task_input_examples(
|
||||||
|
app.state.pipeline.group_key)
|
||||||
|
logger.info('pipeline created.')
|
||||||
|
|
||||||
|
|
||||||
|
def _shutdown_model(app: FastAPI) -> None:
|
||||||
|
app.state.pipeline = None
|
||||||
|
logger.info('shutdown model service')
|
||||||
|
|
||||||
|
|
||||||
|
def start_app_handler(app: FastAPI) -> Callable:
|
||||||
|
|
||||||
|
def startup() -> None:
|
||||||
|
_startup_model(app)
|
||||||
|
|
||||||
|
return startup
|
||||||
|
|
||||||
|
|
||||||
|
def stop_app_handler(app: FastAPI) -> Callable:
|
||||||
|
|
||||||
|
def shutdown() -> None:
|
||||||
|
_shutdown_model(app)
|
||||||
|
|
||||||
|
return shutdown
|
||||||
0
modelscope/server/models/__init__.py
Normal file
0
modelscope/server/models/__init__.py
Normal file
8
modelscope/server/models/input.py
Normal file
8
modelscope/server/models/input.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class ModelScopeRequest(BaseModel):
|
||||||
|
|
||||||
|
def __init__(self, input: object, parameters: object):
|
||||||
|
self.input = input
|
||||||
|
self.parameters = parameters
|
||||||
34
modelscope/server/models/output.py
Normal file
34
modelscope/server/models/output.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
import datetime
|
||||||
|
from http import HTTPStatus
|
||||||
|
from typing import Generic, Optional, Type, TypeVar
|
||||||
|
|
||||||
|
import json
|
||||||
|
from pydantic.generics import GenericModel
|
||||||
|
|
||||||
|
ResultType = TypeVar('ResultType')
|
||||||
|
|
||||||
|
|
||||||
|
class ApiResponse(GenericModel, Generic[ResultType]):
|
||||||
|
Code: Optional[int] = HTTPStatus.OK
|
||||||
|
Success: Optional[bool] = True
|
||||||
|
RequestId: Optional[str] = ''
|
||||||
|
Message: Optional[str] = 'success'
|
||||||
|
Data: Optional[ResultType] = {}
|
||||||
|
"""
|
||||||
|
ResultType (_type_): The response data type.
|
||||||
|
Failed: {'Code': 10010101004, 'Message': 'get model info failed, err: unauthorized permission',
|
||||||
|
'RequestId': '', 'Success': False}
|
||||||
|
Success: {'Code': 200, 'Data': {}, 'Message': 'success', 'RequestId': '', 'Success': True}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def set_data(self, data=Type[ResultType]):
|
||||||
|
self.Data = data
|
||||||
|
|
||||||
|
def set_message(self, message):
|
||||||
|
self.Message = message
|
||||||
|
|
||||||
|
def toJSON(self):
|
||||||
|
return json.dumps(self, default=lambda o: o.isoformat() if (isinstance(o, datetime.datetime))
|
||||||
|
else o.__dict__, sort_keys=True, indent=4)
|
||||||
|
"""
|
||||||
90
modelscope/utils/deploy_checker.py
Normal file
90
modelscope/utils/deploy_checker.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import traceback
|
||||||
|
from typing import List, Union
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from modelscope.hub.api import HubApi
|
||||||
|
from modelscope.hub.file_download import model_file_download
|
||||||
|
from modelscope.hub.utils.utils import get_cache_dir
|
||||||
|
from modelscope.pipelines import pipeline
|
||||||
|
from modelscope.utils.config import Config
|
||||||
|
from modelscope.utils.constant import ModelFile
|
||||||
|
from modelscope.utils.input_output import (
|
||||||
|
call_pipeline_with_json, get_pipeline_information_by_pipeline,
|
||||||
|
get_task_input_examples, pipeline_output_to_service_base64_output)
|
||||||
|
from modelscope.utils.logger import get_logger
|
||||||
|
|
||||||
|
logger = get_logger()
|
||||||
|
|
||||||
|
|
||||||
|
class DeployChecker:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.api = HubApi()
|
||||||
|
|
||||||
|
def check_model(self, model_id: str, model_revision=None):
|
||||||
|
# get model_revision & task info
|
||||||
|
if not model_revision:
|
||||||
|
model_revisions = self.api.list_model_revisions(model_id)
|
||||||
|
logger.info(
|
||||||
|
f'All model_revisions of `{model_id}`: {model_revisions}')
|
||||||
|
if len(model_revisions):
|
||||||
|
model_revision = model_revisions[0]
|
||||||
|
else:
|
||||||
|
logger.error(f'{model_id} has no revision.')
|
||||||
|
|
||||||
|
configuration_file = model_file_download(
|
||||||
|
model_id=model_id,
|
||||||
|
file_path=ModelFile.CONFIGURATION,
|
||||||
|
revision=model_revision)
|
||||||
|
cfg = Config.from_file(configuration_file)
|
||||||
|
task = cfg.safe_get('task')
|
||||||
|
|
||||||
|
# init pipeline
|
||||||
|
ppl = pipeline(
|
||||||
|
task=task,
|
||||||
|
model=model_id,
|
||||||
|
model_revision=model_revision,
|
||||||
|
llm_first=True)
|
||||||
|
pipeline_info = get_pipeline_information_by_pipeline(ppl)
|
||||||
|
|
||||||
|
# call pipeline
|
||||||
|
data = get_task_input_examples(task)
|
||||||
|
|
||||||
|
infer_result = call_pipeline_with_json(pipeline_info, ppl, data)
|
||||||
|
result = pipeline_output_to_service_base64_output(task, infer_result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def check_deploy(models: Union[str, List], revisions: Union[str, List] = None):
|
||||||
|
if not isinstance(models, list):
|
||||||
|
models = [models]
|
||||||
|
if not isinstance(revisions, list):
|
||||||
|
revisions = [revisions] * (1 if revisions else len(models))
|
||||||
|
|
||||||
|
if len(models) != len(revisions):
|
||||||
|
logger.error(
|
||||||
|
f'The number of models and revisions need to be equal: The number of models'
|
||||||
|
f' is {len(model)} while the number of revisions is {len(revision)}.'
|
||||||
|
)
|
||||||
|
|
||||||
|
checker = DeployChecker()
|
||||||
|
for model, revision in zip(models, revisions):
|
||||||
|
try:
|
||||||
|
res = checker.check_model(model, revision)
|
||||||
|
logger.info(f'{model} {revision}: Deploy pre-check pass. {res}\n')
|
||||||
|
except BaseException as e:
|
||||||
|
logger.info(
|
||||||
|
f'{model} {revision}: Deploy pre-check failed: {e}. {traceback.print_exc()}\n'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--model_id', type=str)
|
||||||
|
parser.add_argument('--revision', type=str, default=None)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
check_deploy(args.model_id, args.revision)
|
||||||
@@ -36,16 +36,18 @@ decodes relevant fields.
|
|||||||
Example:
|
Example:
|
||||||
# create pipeine instance and pipeline information, save it to app
|
# create pipeine instance and pipeline information, save it to app
|
||||||
pipeline_instance = create_pipeline('damo/cv_gpen_image-portrait-enhancement', 'v1.0.0')
|
pipeline_instance = create_pipeline('damo/cv_gpen_image-portrait-enhancement', 'v1.0.0')
|
||||||
|
# get pipeline information, input,output, request example.
|
||||||
pipeline_info = get_pipeline_information_by_pipeline(pipeline_instance)
|
pipeline_info = get_pipeline_information_by_pipeline(pipeline_instance)
|
||||||
|
# save the pipeline and info to the app for use in subsequent request processing
|
||||||
app.state.pipeline = pipeline_instance
|
app.state.pipeline = pipeline_instance
|
||||||
app.state.pipeline_info = pipeline_info
|
app.state.pipeline_info = pipeline_info
|
||||||
|
|
||||||
# for service schema request.
|
# for inference request, use call_pipeline_with_json to decode input and
|
||||||
pipeline_info = request.app.state.pipeline_info
|
# call pipeline, call pipeline_output_to_service_base64_output
|
||||||
return pipeline_info.schema
|
# to encode necessary fields, and return the result.
|
||||||
|
# request and response are json format.
|
||||||
# for service call request.
|
@router.post('/call')
|
||||||
def inference(request: Request):
|
async def inference(request: Request):
|
||||||
pipeline_service = request.app.state.pipeline
|
pipeline_service = request.app.state.pipeline
|
||||||
pipeline_info = request.app.state.pipeline_info
|
pipeline_info = request.app.state.pipeline_info
|
||||||
request_json = await request.json()
|
request_json = await request.json()
|
||||||
@@ -55,19 +57,30 @@ Example:
|
|||||||
# convert output to json, if binary field, we need encoded.
|
# convert output to json, if binary field, we need encoded.
|
||||||
output = pipeline_output_to_service_base64_output(pipeline_info.task_name, result)
|
output = pipeline_output_to_service_base64_output(pipeline_info.task_name, result)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
# Inference service input and output and sample information can be obtained through the docs interface
|
||||||
|
@router.get('/describe')
|
||||||
|
async def index(request: Request):
|
||||||
|
pipeline_info = request.app.state.pipeline_info
|
||||||
|
return pipeline_info.schema
|
||||||
|
|
||||||
Todo:
|
Todo:
|
||||||
* Support more service input type, such as form.
|
* Support more service input type, such as form.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def create_pipeline(model_id: str, revision: str):
|
def create_pipeline(model_id: str, revision: str, llm_first: bool = True):
|
||||||
model_configuration_file = model_file_download(
|
model_configuration_file = model_file_download(
|
||||||
model_id=model_id,
|
model_id=model_id,
|
||||||
file_path=ModelFile.CONFIGURATION,
|
file_path=ModelFile.CONFIGURATION,
|
||||||
revision=revision)
|
revision=revision)
|
||||||
cfg = Config.from_file(model_configuration_file)
|
cfg = Config.from_file(model_configuration_file)
|
||||||
return pipeline(task=cfg.task, model=model_id, model_revision=revision)
|
return pipeline(
|
||||||
|
task=cfg.task,
|
||||||
|
model=model_id,
|
||||||
|
model_revision=revision,
|
||||||
|
llm_first=llm_first)
|
||||||
|
|
||||||
|
|
||||||
def get_class_user_attributes(cls):
|
def get_class_user_attributes(cls):
|
||||||
@@ -632,7 +645,7 @@ def call_pipeline_with_json(pipeline_info: PipelineInfomation,
|
|||||||
# result = pipeline(**pipeline_inputs)
|
# result = pipeline(**pipeline_inputs)
|
||||||
# else:
|
# else:
|
||||||
pipeline_inputs, parameters = service_base64_input_to_pipeline_input(
|
pipeline_inputs, parameters = service_base64_input_to_pipeline_input(
|
||||||
pipeline_info.task_name, body)
|
pipeline_info['task_name'], body)
|
||||||
result = pipeline(pipeline_inputs, **parameters)
|
result = pipeline(pipeline_inputs, **parameters)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|||||||
@@ -18,10 +18,11 @@ def pre_compile_megatron_util():
|
|||||||
|
|
||||||
def pre_compile_all():
|
def pre_compile_all():
|
||||||
if torch.cuda.is_available(): # extension require cuda.
|
if torch.cuda.is_available(): # extension require cuda.
|
||||||
pre_compile_megatron_util()
|
|
||||||
# pre compile pai-easycv
|
# pre compile pai-easycv
|
||||||
from easycv.thirdparty.deformable_attention.functions import ms_deform_attn_func
|
from easycv.thirdparty.deformable_attention.functions import ms_deform_attn_func
|
||||||
|
pre_compile_megatron_util()
|
||||||
# extension for all platform.
|
# extension for all platform.
|
||||||
|
pre_compile_megatron_util()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
hdbscan
|
hdbscan
|
||||||
hyperpyyaml
|
hyperpyyaml
|
||||||
librosa==0.9.2
|
librosa==0.10.1
|
||||||
MinDAEC
|
MinDAEC
|
||||||
mir_eval>=0.7
|
mir_eval>=0.7
|
||||||
rotary_embedding_torch>=0.1.5
|
rotary_embedding_torch>=0.1.5
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ greenlet>=1.1.2
|
|||||||
inflect
|
inflect
|
||||||
jedi>=0.18.1
|
jedi>=0.18.1
|
||||||
kantts
|
kantts
|
||||||
librosa==0.9.2
|
librosa==0.10.1
|
||||||
lxml
|
lxml
|
||||||
matplotlib
|
matplotlib
|
||||||
msgpack>=1.0.4
|
msgpack>=1.0.4
|
||||||
|
|||||||
@@ -17,7 +17,8 @@ ffmpeg>=1.4
|
|||||||
ffmpeg-python>=0.2.0
|
ffmpeg-python>=0.2.0
|
||||||
ftfy
|
ftfy
|
||||||
fvcore
|
fvcore
|
||||||
healpy
|
# remove for windows support
|
||||||
|
# healpy
|
||||||
imageio>=2.9.0
|
imageio>=2.9.0
|
||||||
imageio-ffmpeg>=0.4.2
|
imageio-ffmpeg>=0.4.2
|
||||||
imgaug>=0.4.0
|
imgaug>=0.4.0
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
addict
|
addict
|
||||||
attrs
|
attrs
|
||||||
datasets>=2.8.0,<=2.13.0
|
datasets>=2.14.5
|
||||||
einops
|
einops
|
||||||
filelock>=3.3.0
|
filelock>=3.3.0
|
||||||
gast>=0.2.2
|
gast>=0.2.2
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ decord>=0.6.0
|
|||||||
diffusers>=0.19.0
|
diffusers>=0.19.0
|
||||||
fairseq
|
fairseq
|
||||||
ftfy>=6.0.3
|
ftfy>=6.0.3
|
||||||
librosa==0.9.2
|
librosa==0.10.1
|
||||||
opencv-python
|
opencv-python
|
||||||
pycocoevalcap>=1.2
|
pycocoevalcap>=1.2
|
||||||
pycocotools>=2.0.4
|
pycocotools>=2.0.4
|
||||||
|
|||||||
4
requirements/svr.txt
Normal file
4
requirements/svr.txt
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
fastapi
|
||||||
|
requests
|
||||||
|
sse-starlette
|
||||||
|
uvicorn
|
||||||
@@ -1 +1 @@
|
|||||||
numpy<1.20.0
|
numpy<=1.18.5
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ class Human3DAnimationTest(unittest.TestCase):
|
|||||||
'action_dataset': 'damo/3DHuman_action_dataset',
|
'action_dataset': 'damo/3DHuman_action_dataset',
|
||||||
'action': 'SwingDancing',
|
'action': 'SwingDancing',
|
||||||
'save_dir': 'outputs',
|
'save_dir': 'outputs',
|
||||||
|
'blender': 'blender',
|
||||||
}
|
}
|
||||||
output = human3d(input)
|
output = human3d(input)
|
||||||
print('saved animation file to %s' % output)
|
print('saved animation file to %s' % output)
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ class Human3DRenderTest(unittest.TestCase):
|
|||||||
input = {
|
input = {
|
||||||
'dataset_id': 'damo/3DHuman_synthetic_dataset',
|
'dataset_id': 'damo/3DHuman_synthetic_dataset',
|
||||||
'case_id': '3f2a7538253e42a8',
|
'case_id': '3f2a7538253e42a8',
|
||||||
|
'resolution': 1024,
|
||||||
}
|
}
|
||||||
output = human3d(input)
|
output = human3d(input)
|
||||||
self.save_results(output, './human3d_results')
|
self.save_results(output, './human3d_results')
|
||||||
|
|||||||
Reference in New Issue
Block a user