mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-16 16:27:45 +01:00
Refactor Dockerfile (#1036)
This commit is contained in:
@@ -1,168 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# default values.
|
|
||||||
BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu
|
|
||||||
BASE_GPU_CUDA113_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
|
||||||
BASE_GPU_CUDA117_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.7.1-cudnn8-devel
|
|
||||||
BASE_GPU_CUDA118_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.8.0-cudnn8-devel
|
|
||||||
BASE_GPU_CUDA121_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:22.04-cuda12.1.0-cudnn8-devel
|
|
||||||
BASE_GPU_CUDA122_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:22.04-cuda11.2.2-cudnn8-devel
|
|
||||||
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
|
|
||||||
python_version=3.7.13
|
|
||||||
torch_version=1.11.0
|
|
||||||
cuda_version=11.7.1
|
|
||||||
cudatoolkit_version=11.3
|
|
||||||
tensorflow_version=1.15.5
|
|
||||||
os_version=20.04
|
|
||||||
version=None
|
|
||||||
is_cpu=False
|
|
||||||
is_dryrun=False
|
|
||||||
function usage(){
|
|
||||||
echo "usage: build.sh "
|
|
||||||
echo " --os=ubuntu_version set ubuntu os version, default: 20.04"
|
|
||||||
echo " --python=python_version set python version, default: $python_version"
|
|
||||||
echo " --cuda=cuda_version set cuda version,only[11.3.0, 11.7.1], fefault: $cuda_version"
|
|
||||||
echo " --torch=torch_version set pytorch version, fefault: $torch_version"
|
|
||||||
echo " --tensorflow=tensorflow_version set tensorflow version, default: $tensorflow_version"
|
|
||||||
echo " --test option for run test before push image, only push on ci test pass"
|
|
||||||
echo " --cpu option for build cpu version"
|
|
||||||
echo " --push option for push image to remote repo"
|
|
||||||
echo " --dryrun create Dockerfile not build"
|
|
||||||
}
|
|
||||||
for i in "$@"; do
|
|
||||||
case $i in
|
|
||||||
--os=*)
|
|
||||||
os_version="${i#*=}"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--python=*)
|
|
||||||
python_version="${i#*=}"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--cuda=*)
|
|
||||||
cuda_version="${i#*=}"
|
|
||||||
shift # pytorch version
|
|
||||||
;;
|
|
||||||
--torch=*)
|
|
||||||
torch_version="${i#*=}"
|
|
||||||
shift # pytorch version
|
|
||||||
;;
|
|
||||||
--tensorflow=*)
|
|
||||||
tensorflow_version="${i#*=}"
|
|
||||||
shift # tensorflow version
|
|
||||||
;;
|
|
||||||
--version=*)
|
|
||||||
version="${i#*=}"
|
|
||||||
shift # version
|
|
||||||
;;
|
|
||||||
--cpu)
|
|
||||||
is_cpu=True
|
|
||||||
shift # is cpu image
|
|
||||||
;;
|
|
||||||
--push)
|
|
||||||
is_push=True
|
|
||||||
shift # option for push image to remote repo
|
|
||||||
;;
|
|
||||||
--dryrun)
|
|
||||||
is_dryrun=True
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--help)
|
|
||||||
usage
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
-*|--*)
|
|
||||||
echo "Unknown option $i"
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$cuda_version" == 11.3.0 ]; then
|
|
||||||
echo "Building base image cuda11.3.0"
|
|
||||||
BASE_GPU_IMAGE=$os_version-$cudatoolkit_version-cudnn8-devel
|
|
||||||
cudatoolkit_version=cu113
|
|
||||||
elif [ "$cuda_version" == 11.7.1 ]; then
|
|
||||||
echo "Building base image cuda11.7.1"
|
|
||||||
cudatoolkit_version=cu117
|
|
||||||
BASE_GPU_IMAGE=$BASE_GPU_CUDA117_IMAGE
|
|
||||||
elif [ "$cuda_version" == 11.8.0 ]; then
|
|
||||||
echo "Building base image cuda11.8.0"
|
|
||||||
cudatoolkit_version=cu118
|
|
||||||
BASE_GPU_IMAGE=$MODELSCOPE_REPO_ADDRESS:$os_version-cuda$cuda_version-cudnn8-devel
|
|
||||||
elif [ "$cuda_version" == 12.1.0 ]; then
|
|
||||||
cudatoolkit_version=cu121
|
|
||||||
BASE_GPU_IMAGE=$BASE_GPU_CUDA121_IMAGE
|
|
||||||
else
|
|
||||||
echo "Unsupport cuda version: $cuda_version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
export BASE_IMAGE=$BASE_CPU_IMAGE:$os_version
|
|
||||||
base_tag=ubuntu$os_version
|
|
||||||
export USE_GPU=False
|
|
||||||
else
|
|
||||||
export BASE_IMAGE=$BASE_GPU_IMAGE
|
|
||||||
base_tag=ubuntu$os_version-cuda$cuda_version
|
|
||||||
export USE_GPU=True
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $python_version == 3.7* ]]; then
|
|
||||||
base_tag=$base_tag-py37
|
|
||||||
elif [[ $python_version == 3.8* ]]; then
|
|
||||||
base_tag=$base_tag-py38
|
|
||||||
elif [[ $python_version == 3.10* ]]; then
|
|
||||||
base_tag=$base_tag-py310
|
|
||||||
else
|
|
||||||
echo "Unsupport python version: $python_version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
# target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base
|
|
||||||
# cpu no tensorflow
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
target_image_tag=$base_tag-torch$torch_version-base
|
|
||||||
else
|
|
||||||
target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base
|
|
||||||
fi
|
|
||||||
|
|
||||||
export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag
|
|
||||||
export PYTHON_VERSION=$python_version
|
|
||||||
export TORCH_VERSION=$torch_version
|
|
||||||
export CUDATOOLKIT_VERSION=$cudatoolkit_version
|
|
||||||
export TENSORFLOW_VERSION=$tensorflow_version
|
|
||||||
echo "From: $BASE_IMAGE build: $target_image_tag"
|
|
||||||
echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\n"
|
|
||||||
docker_file_content=`cat docker/Dockerfile.ubuntu_base`
|
|
||||||
printf "$docker_file_content" > Dockerfile
|
|
||||||
|
|
||||||
if [ "$is_dryrun" == "True" ]; then
|
|
||||||
echo 'Dockerfile created'
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# DOCKER_BUILDKIT=0
|
|
||||||
while true
|
|
||||||
do
|
|
||||||
DOCKER_BUILDKIT=0 docker build -t $IMAGE_TO_BUILD \
|
|
||||||
--build-arg USE_GPU \
|
|
||||||
--build-arg BASE_IMAGE \
|
|
||||||
--build-arg PYTHON_VERSION \
|
|
||||||
--build-arg TORCH_VERSION \
|
|
||||||
--build-arg CUDATOOLKIT_VERSION \
|
|
||||||
--build-arg TENSORFLOW_VERSION \
|
|
||||||
-f Dockerfile .
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "Image build done"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "Running docker build command error, we will retry"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$is_push" == "True" ]; then
|
|
||||||
echo "Pushing image: $IMAGE_TO_BUILD"
|
|
||||||
docker push $IMAGE_TO_BUILD
|
|
||||||
fi
|
|
||||||
@@ -1,204 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# default values.
|
|
||||||
#BASE_PY37_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-base
|
|
||||||
#BASE_PY38_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-base
|
|
||||||
#BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-base
|
|
||||||
#BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.7.1-py38-torch2.0.1-tf1.15.5-base
|
|
||||||
#BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.7.1-py38-torch1.13.1-tf2.6.0-base
|
|
||||||
#BASE_PY37_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base
|
|
||||||
MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope
|
|
||||||
python_version=3.7.13
|
|
||||||
torch_version=1.11.0
|
|
||||||
cudatoolkit_version=11.7
|
|
||||||
tensorflow_version=1.15.5
|
|
||||||
modelscope_version=None
|
|
||||||
cuda_version=11.7.1
|
|
||||||
is_dsw=False
|
|
||||||
is_cpu=False
|
|
||||||
build_branch='master'
|
|
||||||
function usage(){
|
|
||||||
echo "usage: build.sh "
|
|
||||||
echo " --python=python_version set python version, default: $python_version"
|
|
||||||
echo " --cuda=cuda_version set cuda version,only[11.3.0, 11.7.1], fefault: $cuda_version"
|
|
||||||
echo " --torch=torch_version set pytorch version, fefault: $torch_version"
|
|
||||||
echo " --tensorflow=tensorflow_version set tensorflow version, default: $tensorflow_version"
|
|
||||||
echo " --modelscope=modelscope_version set modelscope version, default: $modelscope_version"
|
|
||||||
echo " --branch=build_branch set modelscope build branch, default: $build_branch"
|
|
||||||
echo " --cpu option for build cpu version"
|
|
||||||
echo " --dsw option for build dsw version"
|
|
||||||
echo " --push option for push image to remote repo"
|
|
||||||
}
|
|
||||||
for i in "$@"; do
|
|
||||||
case $i in
|
|
||||||
--python=*)
|
|
||||||
python_version="${i#*=}"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--cuda=*)
|
|
||||||
cuda_version="${i#*=}"
|
|
||||||
if [ "$cuda_version" == "11.3.0" ]; then
|
|
||||||
cudatoolkit_version=11.3
|
|
||||||
elif [ "$cuda_version" == "11.7.1" ]; then
|
|
||||||
cudatoolkit_version=11.7
|
|
||||||
elif [ "$cuda_version" == "11.8.0" ]; then
|
|
||||||
cudatoolkit_version=11.8
|
|
||||||
elif [ "$cuda_version" == "12.1.0" ]; then
|
|
||||||
cudatoolkit_version=12.1
|
|
||||||
else
|
|
||||||
echo "Unsupport cuda version $cuda_version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
shift # pytorch version
|
|
||||||
;;
|
|
||||||
--torch=*)
|
|
||||||
torch_version="${i#*=}"
|
|
||||||
shift # pytorch version
|
|
||||||
;;
|
|
||||||
--tensorflow=*)
|
|
||||||
tensorflow_version="${i#*=}"
|
|
||||||
shift # tensorflow version
|
|
||||||
;;
|
|
||||||
--cudatoolkit=*)
|
|
||||||
cudatoolkit_version="${i#*=}"
|
|
||||||
shift # cudatoolkit for pytorch
|
|
||||||
;;
|
|
||||||
--modelscope=*)
|
|
||||||
modelscope_version="${i#*=}"
|
|
||||||
shift # modelscope version
|
|
||||||
;;
|
|
||||||
--branch=*)
|
|
||||||
build_branch="${i#*=}"
|
|
||||||
shift # build branch
|
|
||||||
;;
|
|
||||||
--cpu)
|
|
||||||
is_cpu=True
|
|
||||||
shift # is cpu image
|
|
||||||
;;
|
|
||||||
--dsw)
|
|
||||||
is_dsw=True
|
|
||||||
shift # is dsw, will set dsw cache location
|
|
||||||
;;
|
|
||||||
--push)
|
|
||||||
is_push=True
|
|
||||||
shift # option for push image to remote repo
|
|
||||||
;;
|
|
||||||
--help)
|
|
||||||
usage
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
-*|--*)
|
|
||||||
echo "Unknown option $i"
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$modelscope_version" == "None" ]; then
|
|
||||||
echo "ModelScope version must specify!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
base_tag=ubuntu20.04
|
|
||||||
export USE_GPU=False
|
|
||||||
else
|
|
||||||
base_tag=ubuntu20.04-cuda$cuda_version
|
|
||||||
export USE_GPU=True
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $python_version == 3.7* ]]; then
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
echo "Building python3.7 cpu image"
|
|
||||||
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py37-torch$torch_version-tf$tensorflow_version-base
|
|
||||||
else
|
|
||||||
echo "Building python3.7 gpu image"
|
|
||||||
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda$cuda_version-py37-torch$torch_version-tf$tensorflow_version-base
|
|
||||||
fi
|
|
||||||
base_tag=$base_tag-py37
|
|
||||||
elif [[ $python_version == 3.8* ]]; then
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
echo "Building python3.8 cpu image"
|
|
||||||
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py38-torch$torch_version-tf$tensorflow_version-base
|
|
||||||
else
|
|
||||||
echo "Building python3.8 gpu image"
|
|
||||||
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda$cuda_version-py38-torch$torch_version-tf$tensorflow_version-base
|
|
||||||
fi
|
|
||||||
base_tag=$base_tag-py38
|
|
||||||
elif [[ $python_version == 3.10* ]]; then
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
echo "Building python3.10 cpu image"
|
|
||||||
base_tag=ubuntu22.04-py310
|
|
||||||
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-py310-torch$torch_version-base
|
|
||||||
else
|
|
||||||
echo "Building python3.10 gpu image"
|
|
||||||
base_tag=ubuntu22.04-cuda$cuda_version-py310
|
|
||||||
# reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda12.1.0-py310-torch2.1.0-tf2.14.0-base
|
|
||||||
export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda$cuda_version-py310-torch$torch_version-tf$tensorflow_version-base
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Unsupport python version: $python_version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
# cpu not intall tensorflow
|
|
||||||
# target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-$modelscope_version-test
|
|
||||||
if [ "$is_cpu" == "True" ]; then
|
|
||||||
target_image_tag=$base_tag-torch$torch_version-$modelscope_version-test
|
|
||||||
else
|
|
||||||
target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-$modelscope_version-test
|
|
||||||
fi
|
|
||||||
export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag
|
|
||||||
export PYTHON_VERSION=$python_version
|
|
||||||
export TORCH_VERSION=$torch_version
|
|
||||||
export CUDATOOLKIT_VERSION=$cudatoolkit_version
|
|
||||||
export TENSORFLOW_VERSION=$tensorflow_version
|
|
||||||
echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\nis_ci:$is_ci_test\nis_dsw:$is_dsw\n"
|
|
||||||
echo -e "Base iamge: $BASE_IMAGE"
|
|
||||||
docker_file_content=`cat docker/Dockerfile.ubuntu`
|
|
||||||
|
|
||||||
BUILD_HASH_ID=$(git rev-parse HEAD)
|
|
||||||
# install thrid part library
|
|
||||||
docker_file_content="${docker_file_content} \nRUN export COMMIT_ID=$BUILD_HASH_ID && pip install --no-cache-dir -U adaseq pai-easycv && pip install --no-cache-dir -U 'ms-swift' 'decord' 'qwen_vl_utils' 'pyav' 'librosa' 'funasr' autoawq 'timm>0.9.5' 'transformers' 'accelerate' 'peft' 'optimum' 'trl' 'outlines<0.1'"
|
|
||||||
|
|
||||||
docker_file_content="${docker_file_content} \nRUN pip uninstall modelscope -y && export COMMIT_ID=$BUILD_HASH_ID && cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $build_branch --single-branch $REPO_URL && cd modelscope && pip install . && cd / && rm -fr /tmp/modelscope && pip cache purge;"
|
|
||||||
|
|
||||||
echo "$is_dsw"
|
|
||||||
if [ "$is_dsw" == "False" ]; then
|
|
||||||
echo "Not DSW image"
|
|
||||||
else
|
|
||||||
echo "Building dsw image will need set ModelScope lib cache location."
|
|
||||||
docker_file_content="${docker_file_content} \nENV MODELSCOPE_CACHE=/mnt/workspace/.cache/modelscope"
|
|
||||||
# pre compile extension
|
|
||||||
docker_file_content="${docker_file_content} \nRUN pip uninstall -y tb-nightly tensorboard && pip install --no-cache-dir -U tensorboard && TORCH_CUDA_ARCH_LIST='6.0 6.1 7.0 7.5 8.0 8.9 9.0 8.6+PTX' python -c 'from modelscope.utils.pre_compile import pre_compile_all;pre_compile_all()'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
docker_file_content="${docker_file_content} \n RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
|
||||||
pip config set install.trusted-host mirrors.aliyun.com && \
|
|
||||||
cp /tmp/resources/ubuntu2204.aliyun /etc/apt/sources.list "
|
|
||||||
|
|
||||||
printf "$docker_file_content" > Dockerfile
|
|
||||||
|
|
||||||
while true
|
|
||||||
do
|
|
||||||
docker build --progress=plain -t $IMAGE_TO_BUILD \
|
|
||||||
--build-arg USE_GPU \
|
|
||||||
--build-arg BASE_IMAGE \
|
|
||||||
--build-arg PYTHON_VERSION \
|
|
||||||
--build-arg TORCH_VERSION \
|
|
||||||
--build-arg CUDATOOLKIT_VERSION \
|
|
||||||
--build-arg TENSORFLOW_VERSION \
|
|
||||||
-f Dockerfile .
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "Image build done"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
echo "Running docker build command error, we will retry"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$is_push" == "True" ]; then
|
|
||||||
echo "Pushing image: $IMAGE_TO_BUILD"
|
|
||||||
docker push $IMAGE_TO_BUILD
|
|
||||||
fi
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
#sudo docker run --name zwm_maas -v /home/wenmeng.zwm/workspace:/home/wenmeng.zwm/workspace --net host -ti reg.docker.alibaba-inc.com/pai-dlc/tensorflow-training:2.3-gpu-py36-cu101-ubuntu18.04 bash
|
|
||||||
#sudo docker run --name zwm_maas_pytorch -v /home/wenmeng.zwm/workspace:/home/wenmeng.zwm/workspace --net host -ti reg.docker.alibaba-inc.com/pai-dlc/pytorch-training:1.10PAI-gpu-py36-cu113-ubuntu18.04 bash
|
|
||||||
CONTAINER_NAME=modelscope-dev
|
|
||||||
IMAGE_NAME=registry.cn-shanghai.aliyuncs.com/modelscope/modelscope
|
|
||||||
IMAGE_VERSION=v0.1.1-16-g62856fa-devel
|
|
||||||
MOUNT_DIR=/home/wenmeng.zwm/workspace
|
|
||||||
sudo docker run --name $CONTAINER_NAME -v $MOUNT_DIR:$MOUNT_DIR --net host -ti ${IMAGE_NAME}:${IMAGE_VERSION} bash
|
|
||||||
48
.github/workflows/docker-image.yml
vendored
Normal file
48
.github/workflows/docker-image.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
name: Build Docker Images
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
modelscope_branch:
|
||||||
|
description: 'ModelScope branch to build from'
|
||||||
|
required: true
|
||||||
|
image_type:
|
||||||
|
description: 'The image type to build'
|
||||||
|
required: true
|
||||||
|
modelscope_version:
|
||||||
|
description: 'ModelScope version to use'
|
||||||
|
required: true
|
||||||
|
swift_branch:
|
||||||
|
description: 'SWIFT branch to use'
|
||||||
|
required: true
|
||||||
|
torch_version:
|
||||||
|
description: 'Torch version to use'
|
||||||
|
required: false
|
||||||
|
torchvision_version:
|
||||||
|
description: 'Torchvision version to use'
|
||||||
|
required: false
|
||||||
|
cuda_version:
|
||||||
|
description: 'CUDA version to use'
|
||||||
|
required: false
|
||||||
|
torchaudio_version:
|
||||||
|
description: 'TorchAudio version to use'
|
||||||
|
required: false
|
||||||
|
vllm_version:
|
||||||
|
description: 'VLLM version to use'
|
||||||
|
required: false
|
||||||
|
lmdeploy_version:
|
||||||
|
description: 'LMDeploy version to use'
|
||||||
|
required: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: [modelscope-self-hosted-us]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.inputs.modelscope_branch }}
|
||||||
|
|
||||||
|
- name: Build Docker Image
|
||||||
|
run: python docker/build_image.py --image_type ${{ github.event.inputs.image_type }} --modelscope_branch ${{ github.event.inputs.modelscope_branch }} --modelscope_version ${{ github.event.inputs.modelscope_version }} --swift_branch ${{ github.event.inputs.swift_branch }} --torch_version ${{ github.event.inputs.torch_version }} --torchvision_version ${{ github.event.inputs.torchvision_version }} --cuda_version ${{ github.event.inputs.cuda_version }} --torchaudio_version ${{ github.event.inputs.torchaudio_version }} --vllm_version ${{ github.event.inputs.vllm_version }} --lmdeploy_version ${{ github.event.inputs.lmdeploy_version }}
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
*.sh
|
|
||||||
*.md
|
*.md
|
||||||
*.dockerfile
|
*.dockerfile
|
||||||
*.zip
|
*.zip
|
||||||
|
|||||||
141
docker/Dockerfile.extra_install
Normal file
141
docker/Dockerfile.extra_install
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
ENV TZ=Asia/Shanghai
|
||||||
|
ENV arch=x86_64
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
COPY docker/rcfiles /tmp/resources
|
||||||
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y --reinstall ca-certificates && \
|
||||||
|
apt-get install -y make apt-utils openssh-server locales wget git strace gdb sox libopenmpi-dev curl \
|
||||||
|
iputils-ping net-tools iproute2 autoconf automake gperf libre2-dev libssl-dev \
|
||||||
|
libtool libcurl4-openssl-dev libb64-dev libgoogle-perftools-dev patchelf \
|
||||||
|
rapidjson-dev scons software-properties-common pkg-config unzip zlib1g-dev \
|
||||||
|
libbz2-dev libreadline-dev libsqlite3-dev llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev liblzma-dev \
|
||||||
|
libarchive-dev libxml2-dev libnuma-dev cmake \
|
||||||
|
libgeos-dev strace vim ffmpeg libsm6 tzdata language-pack-zh-hans \
|
||||||
|
ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build \
|
||||||
|
libjpeg-dev libpng-dev && \
|
||||||
|
wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \
|
||||||
|
dpkg -i ./git-lfs_3.2.0_amd64.deb && \
|
||||||
|
rm -f ./git-lfs_3.2.0_amd64.deb && \
|
||||||
|
locale-gen zh_CN && \
|
||||||
|
locale-gen zh_CN.utf8 && \
|
||||||
|
update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \
|
||||||
|
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
|
||||||
|
dpkg-reconfigure --frontend noninteractive tzdata && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8
|
||||||
|
RUN wget -O /tmp/boost.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.80.0/source/boost_1_80_0.tar.gz && \
|
||||||
|
cd /tmp && tar xzf boost.tar.gz && \
|
||||||
|
mv /tmp/boost_1_80_0/boost /usr/include/boost && \
|
||||||
|
rm -rf /tmp/boost_1_80_0 && rm -rf boost.tar.gz
|
||||||
|
|
||||||
|
#install and config python copy from https://github.com/docker-library/python/blob/1b7a1106674a21e699b155cbd53bf39387284cca/3.10/bookworm/Dockerfile
|
||||||
|
ARG PYTHON_VERSION={python_version}
|
||||||
|
ENV PATH /usr/local/bin:$PATH
|
||||||
|
ENV GPG_KEY A035C8C19219BA821ECEA86B64E628F8D684696D
|
||||||
|
ENV PYTHON_VERSION {python_version}
|
||||||
|
|
||||||
|
#install and config python copy from https://github.com/docker-library/python/blob/1b7a1106674a21e699b155cbd53bf39387284cca/3.10/bookworm/Dockerfile
|
||||||
|
ARG PYTHON_VERSION={python_version}
|
||||||
|
ENV PATH /usr/local/bin:$PATH
|
||||||
|
ENV GPG_KEY A035C8C19219BA821ECEA86B64E628F8D684696D
|
||||||
|
ENV PYTHON_VERSION {python_version}
|
||||||
|
|
||||||
|
RUN set -eux; \
|
||||||
|
\
|
||||||
|
wget -O python.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz"; \
|
||||||
|
wget -O python.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc"; \
|
||||||
|
GNUPGHOME="$(mktemp -d)"; export GNUPGHOME; \
|
||||||
|
gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys "$GPG_KEY"; \
|
||||||
|
gpg --batch --verify python.tar.xz.asc python.tar.xz; \
|
||||||
|
gpgconf --kill all; \
|
||||||
|
rm -rf "$GNUPGHOME" python.tar.xz.asc; \
|
||||||
|
mkdir -p /usr/src/python; \
|
||||||
|
tar --extract --directory /usr/src/python --strip-components=1 --file python.tar.xz; \
|
||||||
|
rm python.tar.xz; \
|
||||||
|
\
|
||||||
|
cd /usr/src/python; \
|
||||||
|
gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; \
|
||||||
|
./configure \
|
||||||
|
--build="$gnuArch" \
|
||||||
|
--enable-loadable-sqlite-extensions \
|
||||||
|
--enable-optimizations \
|
||||||
|
--enable-option-checking=fatal \
|
||||||
|
--enable-shared \
|
||||||
|
--with-lto \
|
||||||
|
--with-system-expat \
|
||||||
|
--without-ensurepip \
|
||||||
|
; \
|
||||||
|
nproc="$(nproc)"; \
|
||||||
|
EXTRA_CFLAGS="$(dpkg-buildflags --get CFLAGS)"; \
|
||||||
|
LDFLAGS="$(dpkg-buildflags --get LDFLAGS)"; \
|
||||||
|
make -j "$nproc" \
|
||||||
|
"EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" \
|
||||||
|
"LDFLAGS=${LDFLAGS:-}" \
|
||||||
|
"PROFILE_TASK=${PROFILE_TASK:-}" \
|
||||||
|
; \
|
||||||
|
rm python; \
|
||||||
|
make -j "$nproc" \
|
||||||
|
"EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" \
|
||||||
|
"LDFLAGS=${LDFLAGS:--Wl},-rpath='\$\$ORIGIN/../lib'" \
|
||||||
|
"PROFILE_TASK=${PROFILE_TASK:-}" \
|
||||||
|
python \
|
||||||
|
; \
|
||||||
|
make install; \
|
||||||
|
\
|
||||||
|
bin="$(readlink -ve /usr/local/bin/python3)"; \
|
||||||
|
dir="$(dirname "$bin")"; \
|
||||||
|
mkdir -p "/usr/share/gdb/auto-load/$dir"; \
|
||||||
|
cp -vL Tools/gdb/libpython.py "/usr/share/gdb/auto-load/$bin-gdb.py"; \
|
||||||
|
\
|
||||||
|
cd /; \
|
||||||
|
rm -rf /usr/src/python; \
|
||||||
|
\
|
||||||
|
find /usr/local -depth \
|
||||||
|
\( \
|
||||||
|
\( -type d -a \( -name test -o -name tests -o -name idle_test \) \) \
|
||||||
|
-o \( -type f -a \( -name '*.pyc' -o -name '*.pyo' -o -name 'libpython*.a' \) \) \
|
||||||
|
\) -exec rm -rf '{}' + \
|
||||||
|
; \
|
||||||
|
\
|
||||||
|
ldconfig; \
|
||||||
|
\
|
||||||
|
python3 --version
|
||||||
|
|
||||||
|
# make some useful symlinks that are expected to exist ("/usr/local/bin/python" and friends)
|
||||||
|
RUN set -eux; \
|
||||||
|
for src in idle3 pydoc3 python3 python3-config; do \
|
||||||
|
dst="$(echo "$src" | tr -d 3)"; \
|
||||||
|
[ -s "/usr/local/bin/$src" ]; \
|
||||||
|
[ ! -e "/usr/local/bin/$dst" ]; \
|
||||||
|
ln -svT "$src" "/usr/local/bin/$dst"; \
|
||||||
|
done
|
||||||
|
|
||||||
|
# if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value '<VERSION>'"
|
||||||
|
ENV PYTHON_PIP_VERSION 23.0.1
|
||||||
|
# https://github.com/docker-library/python/issues/365
|
||||||
|
ENV PYTHON_SETUPTOOLS_VERSION 65.5.1
|
||||||
|
# https://github.com/pypa/get-pip
|
||||||
|
ENV PYTHON_GET_PIP_URL https://github.com/pypa/get-pip/raw/dbf0c85f76fb6e1ab42aa672ffca6f0a675d9ee4/public/get-pip.py
|
||||||
|
ENV PYTHON_GET_PIP_SHA256 dfe9fd5c28dc98b5ac17979a953ea550cec37ae1b47a5116007395bfacff2ab9
|
||||||
|
|
||||||
|
RUN set -eux; \
|
||||||
|
\
|
||||||
|
wget -O get-pip.py "$PYTHON_GET_PIP_URL"; \
|
||||||
|
echo "$PYTHON_GET_PIP_SHA256 *get-pip.py" | sha256sum -c -; \
|
||||||
|
\
|
||||||
|
export PYTHONDONTWRITEBYTECODE=1; \
|
||||||
|
\
|
||||||
|
python get-pip.py \
|
||||||
|
--disable-pip-version-check \
|
||||||
|
--no-cache-dir \
|
||||||
|
--no-compile \
|
||||||
|
"pip==$PYTHON_PIP_VERSION" \
|
||||||
|
"setuptools==$PYTHON_SETUPTOOLS_VERSION" \
|
||||||
|
; \
|
||||||
|
rm -f get-pip.py; \
|
||||||
|
\
|
||||||
|
pip --version
|
||||||
|
# end of install python
|
||||||
|
|
||||||
|
pip install tf-keras -i https://mirrors.aliyun.com/pypi/simple
|
||||||
@@ -1,68 +1,28 @@
|
|||||||
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base
|
FROM {base_image}
|
||||||
FROM $BASE_IMAGE
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV TZ=Asia/Shanghai
|
||||||
|
ENV arch=x86_64
|
||||||
|
|
||||||
|
COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y libsox-dev unzip libaio-dev zip iputils-ping telnet sudo && \
|
apt-get install -y libsox-dev unzip libaio-dev zip iputils-ping telnet sudo git net-tools && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
ARG CUDA_VERSION=cu121
|
{extra_content}
|
||||||
# install jupyter plugin
|
|
||||||
RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \
|
|
||||||
cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/
|
|
||||||
# install ollama
|
|
||||||
RUN curl -fsSL https://ollama.com/install.sh | sh
|
|
||||||
|
|
||||||
COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
|
RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||||
# python3.8 pip install git+https://github.com/jin-s13/xtcocoapi.git@v1.13
|
pip config set install.trusted-host mirrors.aliyun.com && \
|
||||||
# pip install git+https://github.com/gatagat/lap.git@v0.4.0
|
cp /tmp/resources/ubuntu2204.aliyun /etc/apt/sources.list
|
||||||
RUN pip install --no-cache-dir numpy 'cython<=0.29.36' funtextprocessing kwsbp==0.0.6 safetensors typeguard==2.13.3 scikit-learn librosa==0.9.2 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
|
||||||
|
|
||||||
RUN pip install --no-cache-dir adaseq text2sql_lgesql==1.3.0 \
|
RUN echo "cache bust $(date +%Y%m%d%H%M%S)"
|
||||||
git+https://github.com/jin-s13/xtcocoapi.git@v1.14 \
|
|
||||||
git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
|
||||||
|
|
||||||
RUN pip install --no-cache-dir mpi4py paint_ldm \
|
COPY {meta_file} /tmp/install.sh
|
||||||
mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 \
|
|
||||||
ipykernel fasttext fairseq deepspeed apex -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
|
||||||
|
|
||||||
ARG USE_GPU
|
RUN sh /tmp/install.sh {version_args}
|
||||||
|
|
||||||
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0" pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git'; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport detectron2'; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# install dependencies
|
|
||||||
COPY requirements /var/modelscope
|
|
||||||
RUN pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir -r /var/modelscope/audio.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir -r /var/modelscope/cv.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir -r /var/modelscope/multi-modal.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir -r /var/modelscope/nlp.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir -r /var/modelscope/science.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir -r /var/modelscope/tests.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir -r /var/modelscope/server.txt && \
|
|
||||||
pip install --no-cache-dir https://modelscope.oss-cn-beijing.aliyuncs.com/packages/imageio_ffmpeg-0.4.9-py3-none-any.whl --force && \
|
|
||||||
pip install --no-cache-dir 'scipy<1.13.0' && \
|
|
||||||
pip cache purge
|
|
||||||
# 'scipy<1.13.0' for cannot import name 'kaiser' from 'scipy.signal'
|
|
||||||
COPY examples /modelscope/examples
|
|
||||||
# torchmetrics==0.11.4 for ofa
|
|
||||||
# tinycudann for cuda12.1.0 pytorch 2.1.2
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
|
||||||
pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr optimum && \
|
|
||||||
pip install --no-cache-dir flash_attn==2.5.9.post1 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu121/ && \
|
|
||||||
pip install --no-cache-dir -U 'xformers<0.0.27' --index-url https://download.pytorch.org/whl/cu121 && \
|
|
||||||
pip install --no-cache-dir --force tinycudann==1.7 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
|
||||||
pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter && \
|
|
||||||
pip install --no-cache-dir -U triton 'vllm==0.5.2' https://modelscope.oss-cn-beijing.aliyuncs.com/packages/lmdeploy-0.5.0-cp310-cp310-linux_x86_64.whl; \
|
|
||||||
else \
|
|
||||||
echo 'cpu unsupport vllm auto-gptq'; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
ENV SETUPTOOLS_USE_DISTUTILS=stdlib
|
ENV SETUPTOOLS_USE_DISTUTILS=stdlib
|
||||||
ENV VLLM_USE_MODELSCOPE=True
|
ENV VLLM_USE_MODELSCOPE=True
|
||||||
ENV LMDEPLOY_USE_MODELSCOPE=True
|
ENV LMDEPLOY_USE_MODELSCOPE=True
|
||||||
|
ENV MODELSCOPE_CACHE=/mnt/workspace/.cache/modelscope
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel
|
ARG BASE_IMAGE={base_image}
|
||||||
FROM $BASE_IMAGE
|
FROM $BASE_IMAGE
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ENV TZ=Asia/Shanghai
|
ENV TZ=Asia/Shanghai
|
||||||
ENV arch=x86_64
|
ENV arch=x86_64
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
COPY docker/rcfiles /tmp/resources
|
COPY docker/rcfiles /tmp/resources
|
||||||
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins
|
|
||||||
RUN apt-get update && apt-get upgrade -y && apt-get install -y --reinstall ca-certificates && \
|
RUN apt-get update && apt-get upgrade -y && apt-get install -y --reinstall ca-certificates && \
|
||||||
apt-get install -y make apt-utils openssh-server locales wget git strace gdb sox libopenmpi-dev curl \
|
apt-get install -y make apt-utils openssh-server locales wget git strace gdb sox libopenmpi-dev curl \
|
||||||
iputils-ping net-tools iproute2 autoconf automake gperf libre2-dev libssl-dev \
|
iputils-ping net-tools iproute2 autoconf automake gperf libre2-dev libssl-dev \
|
||||||
@@ -34,10 +33,10 @@ RUN wget -O /tmp/boost.tar.gz https://boostorg.jfrog.io/artifactory/main/release
|
|||||||
rm -rf /tmp/boost_1_80_0 && rm -rf boost.tar.gz
|
rm -rf /tmp/boost_1_80_0 && rm -rf boost.tar.gz
|
||||||
|
|
||||||
#install and config python copy from https://github.com/docker-library/python/blob/1b7a1106674a21e699b155cbd53bf39387284cca/3.10/bookworm/Dockerfile
|
#install and config python copy from https://github.com/docker-library/python/blob/1b7a1106674a21e699b155cbd53bf39387284cca/3.10/bookworm/Dockerfile
|
||||||
ARG PYTHON_VERSION=3.10.14
|
ARG PYTHON_VERSION={python_version}
|
||||||
ENV PATH /usr/local/bin:$PATH
|
ENV PATH /usr/local/bin:$PATH
|
||||||
ENV GPG_KEY A035C8C19219BA821ECEA86B64E628F8D684696D
|
ENV GPG_KEY A035C8C19219BA821ECEA86B64E628F8D684696D
|
||||||
ENV PYTHON_VERSION 3.10.14
|
ENV PYTHON_VERSION {python_version}
|
||||||
|
|
||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
\
|
\
|
||||||
@@ -139,15 +138,14 @@ RUN set -eux; \
|
|||||||
pip --version
|
pip --version
|
||||||
# end of install python
|
# end of install python
|
||||||
|
|
||||||
ARG USE_GPU=True
|
ARG USE_GPU={use_gpu}
|
||||||
|
|
||||||
# install pytorch
|
# install pytorch
|
||||||
ARG TORCH_VERSION=2.3.0
|
ARG TORCH_VERSION={torch_version}
|
||||||
ARG CUDATOOLKIT_VERSION=cu121
|
ARG CUDATOOLKIT_VERSION={cudatoolkit_version}
|
||||||
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
pip install --no-cache-dir "torch==2.3.0" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
|
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio; \
|
||||||
pip install --no-cache-dir torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121; \
|
|
||||||
else \
|
else \
|
||||||
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
|
pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
|
||||||
fi
|
fi
|
||||||
@@ -192,7 +190,7 @@ RUN if [ "$USE_GPU" = "True" ] ; then \
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
ARG TENSORFLOW_VERSION=1.15.5
|
ARG TENSORFLOW_VERSION={tf_version}
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
|
pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
|
||||||
else \
|
else \
|
||||||
@@ -200,9 +198,9 @@ ARG TENSORFLOW_VERSION=1.15.5
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
RUN if [ "$USE_GPU" = "True" ] ; then \
|
RUN if [ "$USE_GPU" = "True" ] ; then \
|
||||||
pip install --no-cache-dir "https://modelscope.oss-cn-beijing.aliyuncs.com/packages/mmcv/mmcv_full-1.7.0-cp310-cp310-linux_x86_64.whl"; \
|
cd /tmp && git clone -b ms_build --single-branch https://github.com/tastelikefeet/mmcv.git && cd mmcv && TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0;7.5;8.0;8.9;9.0;8.6+PTX" MMCV_WITH_OPS=1 MAX_JOBS=32 FORCE_CUDA=1 python setup.py bdist_wheel && cd / && rm -fr /tmp/mmcv && pip cache purge; \
|
||||||
else \
|
else \
|
||||||
pip install --no-cache-dir mmcv_full==1.7.0+cputorch230 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
|
cd /tmp && git clone -b ms_build --single-branch https://github.com/tastelikefeet/mmcv.git && cd mmcv && MMCV_WITH_OPS=1 MAX_JOBS=32 python setup.py bdist_wheel && cd / && rm -fr /tmp/mmcv && pip cache purge; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ENTRYPOINT []
|
ENTRYPOINT []
|
||||||
|
|||||||
284
docker/build_image.py
Normal file
284
docker/build_image.py
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
docker_registry = os.environ['DOCKER_REGISTRY']
|
||||||
|
assert docker_registry, 'You must pass a valid DOCKER_REGISTRY'
|
||||||
|
|
||||||
|
|
||||||
|
class Builder:
|
||||||
|
|
||||||
|
def __init__(self, args: Any, dry_run: bool):
|
||||||
|
self.args = self.init_args(args)
|
||||||
|
self.dry_run = dry_run
|
||||||
|
self.args.cudatoolkit_version = self._generate_cudatoolkit_version(
|
||||||
|
args.cuda_version)
|
||||||
|
self.args.python_tag = self._generate_python_tag(args.python_version)
|
||||||
|
|
||||||
|
def init_args(self, args: Any) -> Any:
|
||||||
|
if not args.base_image:
|
||||||
|
# A mirrored image of nvidia/cuda:12.4.0-devel-ubuntu22.04
|
||||||
|
args.base_image = (
|
||||||
|
'modelscope-image-registry.cn-wulanchabu.cr.aliyuncs.com/'
|
||||||
|
'modelscope/mirror:12.4.0-devel-ubuntu22.04')
|
||||||
|
if not args.torch_version:
|
||||||
|
args.torch_version = '2.3.0'
|
||||||
|
args.torchaudio_version = '2.3.0'
|
||||||
|
args.torchvision_version = '0.18.0'
|
||||||
|
if not args.tf_version:
|
||||||
|
args.tf_version = '2.16.1'
|
||||||
|
if not args.cuda_version:
|
||||||
|
args.cuda_version = '12.1.0'
|
||||||
|
if not args.vllm_version:
|
||||||
|
args.vllm_version = '0.5.1'
|
||||||
|
if not args.lmdeploy_version:
|
||||||
|
args.lmdeploy_version = '0.5.0'
|
||||||
|
if not args.autogptq_version:
|
||||||
|
args.autogptq_version = '0.7.1'
|
||||||
|
return args
|
||||||
|
|
||||||
|
def _generate_cudatoolkit_version(self, cuda_version: str) -> str:
|
||||||
|
cuda_version = cuda_version[:cuda_version.rfind('.')]
|
||||||
|
return 'cu' + cuda_version.replace('.', '')
|
||||||
|
|
||||||
|
def _generate_python_tag(self, python_version: str) -> str:
|
||||||
|
python_version = python_version[:python_version.rfind('.')]
|
||||||
|
return 'py' + python_version.replace('.', '')
|
||||||
|
|
||||||
|
def generate_dockerfile(self) -> str:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def _save_dockerfile(self, content: str) -> None:
|
||||||
|
if os.path.exists('./Dockerfile'):
|
||||||
|
os.remove('./Dockerfile')
|
||||||
|
with open('./Dockerfile', 'w') as f:
|
||||||
|
f.write(content)
|
||||||
|
|
||||||
|
def build(self) -> int:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def push(self) -> int:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
content = self.generate_dockerfile()
|
||||||
|
self._save_dockerfile(content)
|
||||||
|
if not self.dry_run:
|
||||||
|
ret = self.build()
|
||||||
|
if ret != 0:
|
||||||
|
raise RuntimeError(f'Docker build error with errno: {ret}')
|
||||||
|
ret = self.push()
|
||||||
|
if ret != 0:
|
||||||
|
raise RuntimeError(f'Docker push error with errno: {ret}')
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCPUImageBuilder(Builder):
|
||||||
|
|
||||||
|
def generate_dockerfile(self) -> str:
|
||||||
|
with open('docker/Dockerfile.ubuntu_base', 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
content = content.replace('{base_image}', self.args.base_image)
|
||||||
|
content = content.replace('{use_gpu}', 'False')
|
||||||
|
content = content.replace('{python_version}', self.args.python_version)
|
||||||
|
content = content.replace('{torch_version}', self.args.torch_version)
|
||||||
|
content = content.replace('{cudatoolkit_version}',
|
||||||
|
self.args.cudatoolkit_version)
|
||||||
|
content = content.replace('{tf_version}', self.args.tf_version)
|
||||||
|
return content
|
||||||
|
|
||||||
|
def build(self):
|
||||||
|
image_tag = f'{docker_registry}:ubuntu{self.args.ubuntu_version}-torch{self.args.torch_version}-base'
|
||||||
|
return os.system(
|
||||||
|
f'DOCKER_BUILDKIT=0 docker build -t {image_tag} -f Dockerfile .')
|
||||||
|
|
||||||
|
def push(self):
|
||||||
|
image_tag = f'{docker_registry}:ubuntu{self.args.ubuntu_version}-torch{self.args.torch_version}-base'
|
||||||
|
return os.system(f'docker push {image_tag}')
|
||||||
|
|
||||||
|
|
||||||
|
class BaseGPUImageBuilder(Builder):
|
||||||
|
|
||||||
|
def generate_dockerfile(self) -> str:
|
||||||
|
with open('docker/Dockerfile.ubuntu_base', 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
content = content.replace('{base_image}', self.args.base_image)
|
||||||
|
content = content.replace('{use_gpu}', 'True')
|
||||||
|
content = content.replace('{python_version}', self.args.python_version)
|
||||||
|
content = content.replace('{torch_version}', self.args.torch_version)
|
||||||
|
content = content.replace('{cudatoolkit_version}',
|
||||||
|
self.args.cudatoolkit_version)
|
||||||
|
content = content.replace('{tf_version}', self.args.tf_version)
|
||||||
|
return content
|
||||||
|
|
||||||
|
def build(self) -> int:
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-'
|
||||||
|
f'torch{self.args.torch_version}-tf{self.args.tf_version}-base')
|
||||||
|
return os.system(
|
||||||
|
f'DOCKER_BUILDKIT=0 docker build -t {image_tag} -f Dockerfile .')
|
||||||
|
|
||||||
|
def push(self):
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-'
|
||||||
|
f'torch{self.args.torch_version}-tf{self.args.tf_version}-base')
|
||||||
|
return os.system(f'docker push {image_tag}')
|
||||||
|
|
||||||
|
|
||||||
|
class CPUImageBuilder(Builder):
|
||||||
|
|
||||||
|
def generate_dockerfile(self) -> str:
|
||||||
|
meta_file = './docker/install_cpu.sh'
|
||||||
|
version_args = (
|
||||||
|
f'{self.args.torch_version} {self.args.torchvision_version} '
|
||||||
|
f'{self.args.torchaudio_version} {self.args.modelscope_branch} {self.args.swift_branch}'
|
||||||
|
)
|
||||||
|
base_image = f'{docker_registry}:ubuntu{self.args.ubuntu_version}-torch{self.args.torch_version}-base'
|
||||||
|
extra_content = """\nRUN pip install adaseq\nRUN pip install pai-easycv"""
|
||||||
|
|
||||||
|
with open('docker/Dockerfile.ubuntu', 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
content = content.replace('{base_image}', base_image)
|
||||||
|
content = content.replace('{extra_content}', extra_content)
|
||||||
|
content = content.replace('{meta_file}', meta_file)
|
||||||
|
content = content.replace('{version_args}', version_args)
|
||||||
|
return content
|
||||||
|
|
||||||
|
def build(self) -> int:
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-{self.args.python_tag}-'
|
||||||
|
f'torch{self.args.torch_version}-{self.args.modelscope_version}-test'
|
||||||
|
)
|
||||||
|
return os.system(f'docker build -t {image_tag} -f Dockerfile .')
|
||||||
|
|
||||||
|
def push(self):
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-{self.args.python_tag}-'
|
||||||
|
f'torch{self.args.torch_version}-{self.args.modelscope_version}-test'
|
||||||
|
)
|
||||||
|
return os.system(f'docker push {image_tag}')
|
||||||
|
|
||||||
|
|
||||||
|
class GPUImageBuilder(Builder):
|
||||||
|
|
||||||
|
def generate_dockerfile(self) -> str:
|
||||||
|
meta_file = './docker/install.sh'
|
||||||
|
extra_content = """\nRUN pip install adaseq\nRUN pip install pai-easycv"""
|
||||||
|
version_args = (
|
||||||
|
f'{self.args.torch_version} {self.args.torchvision_version} {self.args.torchaudio_version} '
|
||||||
|
f'{self.args.vllm_version} {self.args.lmdeploy_version} {self.args.autogptq_version} '
|
||||||
|
f'{self.args.modelscope_branch} {self.args.swift_branch}')
|
||||||
|
base_image = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-'
|
||||||
|
f'torch{self.args.torch_version}-tf{self.args.tf_version}-base')
|
||||||
|
with open('docker/Dockerfile.ubuntu', 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
content = content.replace('{base_image}', base_image)
|
||||||
|
content = content.replace('{extra_content}', extra_content)
|
||||||
|
content = content.replace('{meta_file}', meta_file)
|
||||||
|
content = content.replace('{version_args}', version_args)
|
||||||
|
return content
|
||||||
|
|
||||||
|
def build(self) -> int:
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-'
|
||||||
|
f'{self.args.python_tag}-torch{self.args.torch_version}-tf{self.args.tf_version}-'
|
||||||
|
f'{self.args.modelscope_version}-test')
|
||||||
|
return os.system(f'docker build -t {image_tag} -f Dockerfile .')
|
||||||
|
|
||||||
|
def push(self):
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-'
|
||||||
|
f'{self.args.python_tag}-torch{self.args.torch_version}-tf{self.args.tf_version}-'
|
||||||
|
f'{self.args.modelscope_version}-test')
|
||||||
|
return os.system(f'docker push {image_tag}')
|
||||||
|
|
||||||
|
|
||||||
|
class LLMImageBuilder(Builder):
|
||||||
|
|
||||||
|
def init_args(self, args) -> Any:
|
||||||
|
if not args.base_image:
|
||||||
|
# A mirrored image of nvidia/cuda:12.4.0-devel-ubuntu22.04
|
||||||
|
args.base_image = (
|
||||||
|
'modelscope-image-registry.cn-wulanchabu.cr.aliyuncs.com/modelscope/'
|
||||||
|
'mirror:12.4.0-devel-ubuntu22.04')
|
||||||
|
if not args.torch_version:
|
||||||
|
args.torch_version = '2.4.0'
|
||||||
|
args.torchaudio_version = '2.4.0'
|
||||||
|
args.torchvision_version = '0.19.0'
|
||||||
|
if not args.cuda_version:
|
||||||
|
args.cuda_version = '12.4.0'
|
||||||
|
if not args.vllm_version:
|
||||||
|
args.vllm_version = '0.6.0'
|
||||||
|
if not args.lmdeploy_version:
|
||||||
|
args.lmdeploy_version = '0.6.1'
|
||||||
|
if not args.autogptq_version:
|
||||||
|
args.autogptq_version = '0.7.1'
|
||||||
|
return args
|
||||||
|
|
||||||
|
def generate_dockerfile(self) -> str:
|
||||||
|
meta_file = './docker/install.sh'
|
||||||
|
with open('docker/Dockerfile.extra_install', 'r') as f:
|
||||||
|
extra_content = f.read()
|
||||||
|
extra_content = extra_content.replace('{python_version}',
|
||||||
|
self.args.python_version)
|
||||||
|
version_args = (
|
||||||
|
f'{self.args.torch_version} {self.args.torchvision_version} {self.args.torchaudio_version} '
|
||||||
|
f'{self.args.vllm_version} {self.args.lmdeploy_version} {self.args.autogptq_version} '
|
||||||
|
f'{self.args.modelscope_branch} {self.args.swift_branch}')
|
||||||
|
with open('docker/Dockerfile.ubuntu', 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
content = content.replace('{base_image}', self.args.base_image)
|
||||||
|
content = content.replace('{extra_content}', extra_content)
|
||||||
|
content = content.replace('{meta_file}', meta_file)
|
||||||
|
content = content.replace('{version_args}', version_args)
|
||||||
|
return content
|
||||||
|
|
||||||
|
def build(self) -> int:
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-'
|
||||||
|
f'{self.args.python_tag}-torch{self.args.torch_version}-{self.args.modelscope_version}-LLM-test'
|
||||||
|
)
|
||||||
|
return os.system(f'docker build -t {image_tag} -f Dockerfile .')
|
||||||
|
|
||||||
|
def push(self):
|
||||||
|
image_tag = (
|
||||||
|
f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-'
|
||||||
|
f'{self.args.python_tag}-torch{self.args.torch_version}-{self.args.modelscope_version}-LLM-test'
|
||||||
|
)
|
||||||
|
return os.system(f'docker push {image_tag}')
|
||||||
|
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--base_image', type=str, default=None)
|
||||||
|
parser.add_argument('--image_type', type=str)
|
||||||
|
parser.add_argument('--python_version', type=str, default='3.10.14')
|
||||||
|
parser.add_argument('--ubuntu_version', type=str, default='22.04')
|
||||||
|
parser.add_argument('--torch_version', type=str, default=None)
|
||||||
|
parser.add_argument('--torchvision_version', type=str, default=None)
|
||||||
|
parser.add_argument('--cuda_version', type=str, default=None)
|
||||||
|
parser.add_argument('--torchaudio_version', type=str, default=None)
|
||||||
|
parser.add_argument('--tf_version', type=str, default=None)
|
||||||
|
parser.add_argument('--vllm_version', type=str, default=None)
|
||||||
|
parser.add_argument('--lmdeploy_version', type=str, default=None)
|
||||||
|
parser.add_argument('--autogptq_version', type=str, default=None)
|
||||||
|
parser.add_argument('--modelscope_branch', type=str, default='master')
|
||||||
|
parser.add_argument('--modelscope_version', type=str, default='9.99.0')
|
||||||
|
parser.add_argument('--swift_branch', type=str, default='main')
|
||||||
|
parser.add_argument('--dry_run', type=int, default=0)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.image_type.lower() == 'base_cpu':
|
||||||
|
builder_cls = BaseCPUImageBuilder
|
||||||
|
elif args.image_type.lower() == 'base_gpu':
|
||||||
|
builder_cls = BaseGPUImageBuilder
|
||||||
|
elif args.image_type.lower() == 'cpu':
|
||||||
|
builder_cls = CPUImageBuilder
|
||||||
|
elif args.image_type.lower() == 'gpu':
|
||||||
|
builder_cls = GPUImageBuilder
|
||||||
|
elif args.image_type.lower() == 'llm':
|
||||||
|
builder_cls = LLMImageBuilder
|
||||||
|
else:
|
||||||
|
raise ValueError(f'Unsupported image_type: {args.image_type}')
|
||||||
|
|
||||||
|
builder_cls(args, args.dry_run)()
|
||||||
50
docker/install.sh
Normal file
50
docker/install.sh
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
torch_version=${1:-2.4.0}
|
||||||
|
torchvision_version=${2:-0.19.0}
|
||||||
|
torchaudio_version=${3:-2.4.0}
|
||||||
|
vllm_version=${4:-0.6.0}
|
||||||
|
lmdeploy_version=${5:-0.6.1}
|
||||||
|
autogptq_version=${6:-0.7.1}
|
||||||
|
modelscope_branch=${7:-master}
|
||||||
|
swift_branch=${8:-main}
|
||||||
|
|
||||||
|
pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
# curl -fsSL https://ollama.com/install.sh | sh
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U funasr
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U qwen_vl_utils pyav librosa autoawq timm transformers accelerate peft optimum trl safetensors
|
||||||
|
|
||||||
|
pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr
|
||||||
|
|
||||||
|
pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
||||||
|
|
||||||
|
pip install --no-cache-dir mpi4py paint_ldm mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 ipykernel fasttext fairseq deepspeed apex -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0" pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git';
|
||||||
|
|
||||||
|
# pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiTRUE-cp310-cp310-linux_x86_64.whl
|
||||||
|
# find on: https://github.com/Dao-AILab/flash-attention/releases
|
||||||
|
# cd /tmp && git clone https://github.com/Dao-AILab/flash-attention.git && cd flash-attention && python setup.py install && cd / && rm -fr /tmp/flash-attention && pip cache purge;
|
||||||
|
|
||||||
|
pip install --no-cache-dir auto-gptq==$autogptq_version
|
||||||
|
|
||||||
|
pip install --no-cache-dir --force tinycudann==1.7 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
# pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U triton
|
||||||
|
|
||||||
|
pip install vllm==$vllm_version -U
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U lmdeploy==$lmdeploy_version --no-deps
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version
|
||||||
|
|
||||||
|
pip uninstall ms-swift modelscope -y
|
||||||
|
|
||||||
|
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $modelscope_branch --single-branch https://github.com/modelscope/modelscope.git && cd modelscope && pip install .[all] && cd / && rm -fr /tmp/modelscope && pip cache purge;
|
||||||
|
|
||||||
|
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $swift_branch --single-branch https://github.com/modelscope/ms-swift.git && cd ms-swift && pip install .[all] && cd / && rm -fr /tmp/ms-swift && pip cache purge;
|
||||||
29
docker/install_cpu.sh
Normal file
29
docker/install_cpu.sh
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
torch_version=${1:-2.4.0}
|
||||||
|
torchvision_version=${2:-0.19.0}
|
||||||
|
torchaudio_version=${3:-2.4.0}
|
||||||
|
modelscope_branch=${4:-master}
|
||||||
|
swift_branch=${5:-main}
|
||||||
|
|
||||||
|
pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U funasr
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U qwen_vl_utils pyav librosa timm transformers accelerate peft trl safetensors
|
||||||
|
|
||||||
|
pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps
|
||||||
|
|
||||||
|
pip install --no-cache-dir mpi4py paint_ldm mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 ipykernel fasttext fairseq -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
|
||||||
|
|
||||||
|
pip uninstall torch torchvision torchaudio
|
||||||
|
|
||||||
|
pip install --no-cache-dir -U torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version --index-url https://download.pytorch.org/whl/cpu
|
||||||
|
|
||||||
|
pip uninstall ms-swift modelscope -y
|
||||||
|
|
||||||
|
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $modelscope_branch --single-branch https://github.com/modelscope/modelscope.git && cd modelscope && pip install .[all] && cd / && rm -fr /tmp/modelscope && pip cache purge;
|
||||||
|
|
||||||
|
cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $swift_branch --single-branch https://github.com/modelscope/ms-swift.git && cd ms-swift && pip install .[all] && cd / && rm -fr /tmp/ms-swift && pip cache purge;
|
||||||
@@ -1,99 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "jupyterlab_active_log",
|
|
||||||
"version": "0.1.0",
|
|
||||||
"description": "A JupyterLab extension.",
|
|
||||||
"keywords": [
|
|
||||||
"jupyter",
|
|
||||||
"jupyterlab",
|
|
||||||
"jupyterlab-extension"
|
|
||||||
],
|
|
||||||
"homepage": "https://github.com/github_username/jupyterlab_active_log",
|
|
||||||
"bugs": {
|
|
||||||
"url": "https://github.com/github_username/jupyterlab_active_log/issues"
|
|
||||||
},
|
|
||||||
"license": "BSD-3-Clause",
|
|
||||||
"files": [
|
|
||||||
"lib/**/*.{d.ts,eot,gif,html,jpg,js,js.map,json,png,svg,woff2,ttf}",
|
|
||||||
"style/**/*.{css,js,eot,gif,html,jpg,json,png,svg,woff2,ttf}"
|
|
||||||
],
|
|
||||||
"main": "lib/index.js",
|
|
||||||
"types": "lib/index.d.ts",
|
|
||||||
"style": "style/index.css",
|
|
||||||
"repository": {
|
|
||||||
"type": "git",
|
|
||||||
"url": "https://github.com/github_username/jupyterlab_active_log.git"
|
|
||||||
},
|
|
||||||
"scripts": {
|
|
||||||
"build": "jlpm build:lib && jlpm build:labextension:dev",
|
|
||||||
"build:prod": "jlpm clean && jlpm build:lib && jlpm build:labextension",
|
|
||||||
"build:labextension": "jupyter labextension build .",
|
|
||||||
"build:labextension:dev": "jupyter labextension build --development True .",
|
|
||||||
"build:lib": "tsc",
|
|
||||||
"clean": "jlpm clean:lib",
|
|
||||||
"clean:lib": "rimraf lib tsconfig.tsbuildinfo",
|
|
||||||
"clean:lintcache": "rimraf .eslintcache .stylelintcache",
|
|
||||||
"clean:labextension": "rimraf jupyterlab_active_log/labextension",
|
|
||||||
"clean:all": "jlpm clean:lib && jlpm clean:labextension && jlpm clean:lintcache",
|
|
||||||
"eslint": "jlpm eslint:check --fix",
|
|
||||||
"eslint:check": "eslint . --cache --ext .ts,.tsx",
|
|
||||||
"install:extension": "jlpm build",
|
|
||||||
"lint": "jlpm stylelint && jlpm prettier && jlpm eslint",
|
|
||||||
"lint:check": "jlpm stylelint:check && jlpm prettier:check && jlpm eslint:check",
|
|
||||||
"prettier": "jlpm prettier:base --write --list-different",
|
|
||||||
"prettier:base": "prettier \"**/*{.ts,.tsx,.js,.jsx,.css,.json,.md}\"",
|
|
||||||
"prettier:check": "jlpm prettier:base --check",
|
|
||||||
"stylelint": "jlpm stylelint:check --fix",
|
|
||||||
"stylelint:check": "stylelint --cache \"style/**/*.css\"",
|
|
||||||
"watch": "run-p watch:src watch:labextension",
|
|
||||||
"watch:src": "tsc -w",
|
|
||||||
"watch:labextension": "jupyter labextension watch ."
|
|
||||||
},
|
|
||||||
"dependencies": {
|
|
||||||
"@jupyterlab/application": "^3.1.0"
|
|
||||||
},
|
|
||||||
"devDependencies": {
|
|
||||||
"@jupyterlab/builder": "^3.1.0",
|
|
||||||
"@typescript-eslint/eslint-plugin": "^4.8.1",
|
|
||||||
"@typescript-eslint/parser": "^4.8.1",
|
|
||||||
"eslint": "^7.14.0",
|
|
||||||
"eslint-config-prettier": "^6.15.0",
|
|
||||||
"eslint-plugin-prettier": "^3.1.4",
|
|
||||||
"npm-run-all": "^4.1.5",
|
|
||||||
"prettier": "^2.1.1",
|
|
||||||
"rimraf": "^3.0.2",
|
|
||||||
"stylelint": "^14.3.0",
|
|
||||||
"stylelint-config-prettier": "^9.0.3",
|
|
||||||
"stylelint-config-recommended": "^6.0.0",
|
|
||||||
"stylelint-config-standard": "~24.0.0",
|
|
||||||
"stylelint-prettier": "^2.0.0",
|
|
||||||
"typescript": "~4.1.3"
|
|
||||||
},
|
|
||||||
"sideEffects": [
|
|
||||||
"style/*.css",
|
|
||||||
"style/index.js"
|
|
||||||
],
|
|
||||||
"styleModule": "style/index.js",
|
|
||||||
"publishConfig": {
|
|
||||||
"access": "public"
|
|
||||||
},
|
|
||||||
"jupyterlab": {
|
|
||||||
"extension": true,
|
|
||||||
"outputDir": "jupyterlab_active_log/labextension",
|
|
||||||
"_build": {
|
|
||||||
"load": "static/remoteEntry.eb3177c3791d7658cc12.js",
|
|
||||||
"extension": "./extension",
|
|
||||||
"style": "./style"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"jupyter-releaser": {
|
|
||||||
"hooks": {
|
|
||||||
"before-build-npm": [
|
|
||||||
"python -m pip install jupyterlab~=3.1",
|
|
||||||
"jlpm"
|
|
||||||
],
|
|
||||||
"before-build-python": [
|
|
||||||
"jlpm clean:all"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
"use strict";(self.webpackChunkjupyterlab_active_log=self.webpackChunkjupyterlab_active_log||[]).push([[568],{568:(t,e,a)=>{a.r(e),a.d(e,{default:()=>i});const i={id:"jupyterlab_active_log:plugin",autoStart:!0,activate:t=>{console.log("JupyterLab extension jupyterlab_active_log is activated!"),window.consts=Object.assign(Object.assign({},window.consts),{recordUrl:"https://modelscope.cn/api/v1/notebooks/activelog",timerDuration:1e4,timerParams:function(){const t=location.pathname.split("/");let e;return t.length>=2&&(e=t[1]),{site:"dsw",id:e,ext:{pathname:location.pathname}}}});const e=document.body,a=e.insertBefore(document.createElement("script"),e.firstChild);a.setAttribute("id","timer-sdk"),a.setAttribute("src","https://g.alicdn.com/alifanyi/translate-js-sdk/timer.js ")}}}}]);
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
"use strict";(self.webpackChunkjupyterlab_active_log=self.webpackChunkjupyterlab_active_log||[]).push([[747],{150:(e,n,t)=>{t.d(n,{Z:()=>a});var r=t(645),o=t.n(r)()((function(e){return e[1]}));o.push([e.id,"/*\n See the JupyterLab Developer Guide for useful CSS Patterns:\n\n https://jupyterlab.readthedocs.io/en/stable/developer/css.html\n*/\n",""]);const a=o},645:e=>{e.exports=function(e){var n=[];return n.toString=function(){return this.map((function(n){var t=e(n);return n[2]?"@media ".concat(n[2]," {").concat(t,"}"):t})).join("")},n.i=function(e,t,r){"string"==typeof e&&(e=[[null,e,""]]);var o={};if(r)for(var a=0;a<this.length;a++){var i=this[a][0];null!=i&&(o[i]=!0)}for(var c=0;c<e.length;c++){var s=[].concat(e[c]);r&&o[s[0]]||(t&&(s[2]?s[2]="".concat(t," and ").concat(s[2]):s[2]=t),n.push(s))}},n}},379:(e,n,t)=>{var r,o=function(){var e={};return function(n){if(void 0===e[n]){var t=document.querySelector(n);if(window.HTMLIFrameElement&&t instanceof window.HTMLIFrameElement)try{t=t.contentDocument.head}catch(e){t=null}e[n]=t}return e[n]}}(),a=[];function i(e){for(var n=-1,t=0;t<a.length;t++)if(a[t].identifier===e){n=t;break}return n}function c(e,n){for(var t={},r=[],o=0;o<e.length;o++){var c=e[o],s=n.base?c[0]+n.base:c[0],u=t[s]||0,l="".concat(s," ").concat(u);t[s]=u+1;var f=i(l),d={css:c[1],media:c[2],sourceMap:c[3]};-1!==f?(a[f].references++,a[f].updater(d)):a.push({identifier:l,updater:v(d,n),references:1}),r.push(l)}return r}function s(e){var n=document.createElement("style"),r=e.attributes||{};if(void 0===r.nonce){var a=t.nc;a&&(r.nonce=a)}if(Object.keys(r).forEach((function(e){n.setAttribute(e,r[e])})),"function"==typeof e.insert)e.insert(n);else{var i=o(e.insert||"head");if(!i)throw new Error("Couldn't find a style target. This probably means that the value for the 'insert' parameter is invalid.");i.appendChild(n)}return n}var u,l=(u=[],function(e,n){return u[e]=n,u.filter(Boolean).join("\n")});function f(e,n,t,r){var o=t?"":r.media?"@media ".concat(r.media," {").concat(r.css,"}"):r.css;if(e.styleSheet)e.styleSheet.cssText=l(n,o);else{var a=document.createTextNode(o),i=e.childNodes;i[n]&&e.removeChild(i[n]),i.length?e.insertBefore(a,i[n]):e.appendChild(a)}}function d(e,n,t){var r=t.css,o=t.media,a=t.sourceMap;if(o?e.setAttribute("media",o):e.removeAttribute("media"),a&&"undefined"!=typeof btoa&&(r+="\n/*# sourceMappingURL=data:application/json;base64,".concat(btoa(unescape(encodeURIComponent(JSON.stringify(a))))," */")),e.styleSheet)e.styleSheet.cssText=r;else{for(;e.firstChild;)e.removeChild(e.firstChild);e.appendChild(document.createTextNode(r))}}var p=null,h=0;function v(e,n){var t,r,o;if(n.singleton){var a=h++;t=p||(p=s(n)),r=f.bind(null,t,a,!1),o=f.bind(null,t,a,!0)}else t=s(n),r=d.bind(null,t,n),o=function(){!function(e){if(null===e.parentNode)return!1;e.parentNode.removeChild(e)}(t)};return r(e),function(n){if(n){if(n.css===e.css&&n.media===e.media&&n.sourceMap===e.sourceMap)return;r(e=n)}else o()}}e.exports=function(e,n){(n=n||{}).singleton||"boolean"==typeof n.singleton||(n.singleton=(void 0===r&&(r=Boolean(window&&document&&document.all&&!window.atob)),r));var t=c(e=e||[],n);return function(e){if(e=e||[],"[object Array]"===Object.prototype.toString.call(e)){for(var r=0;r<t.length;r++){var o=i(t[r]);a[o].references--}for(var s=c(e,n),u=0;u<t.length;u++){var l=i(t[u]);0===a[l].references&&(a[l].updater(),a.splice(l,1))}t=s}}}},747:(e,n,t)=>{t.r(n);var r=t(379),o=t.n(r),a=t(150);o()(a.Z,{insert:"head",singleton:!1}),a.Z.locals}}]);
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
var _JUPYTERLAB;(()=>{"use strict";var e,r,t={293:(e,r,t)=>{var o={"./index":()=>t.e(568).then((()=>()=>t(568))),"./extension":()=>t.e(568).then((()=>()=>t(568))),"./style":()=>t.e(747).then((()=>()=>t(747)))},a=(e,r)=>(t.R=r,r=t.o(o,e)?o[e]():Promise.resolve().then((()=>{throw new Error('Module "'+e+'" does not exist in container.')})),t.R=void 0,r),n=(e,r)=>{if(t.S){var o="default",a=t.S[o];if(a&&a!==e)throw new Error("Container initialization failed as it has already been initialized with a different share scope");return t.S[o]=e,t.I(o,r)}};t.d(r,{get:()=>a,init:()=>n})}},o={};function a(e){var r=o[e];if(void 0!==r)return r.exports;var n=o[e]={id:e,exports:{}};return t[e](n,n.exports,a),n.exports}a.m=t,a.c=o,a.n=e=>{var r=e&&e.__esModule?()=>e.default:()=>e;return a.d(r,{a:r}),r},a.d=(e,r)=>{for(var t in r)a.o(r,t)&&!a.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:r[t]})},a.f={},a.e=e=>Promise.all(Object.keys(a.f).reduce(((r,t)=>(a.f[t](e,r),r)),[])),a.u=e=>e+"."+{568:"a92ae44b87625ab09aed",747:"63b4c3d22bfe458b352b"}[e]+".js?v="+{568:"a92ae44b87625ab09aed",747:"63b4c3d22bfe458b352b"}[e],a.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),a.o=(e,r)=>Object.prototype.hasOwnProperty.call(e,r),e={},r="jupyterlab_active_log:",a.l=(t,o,n,i)=>{if(e[t])e[t].push(o);else{var l,u;if(void 0!==n)for(var c=document.getElementsByTagName("script"),d=0;d<c.length;d++){var s=c[d];if(s.getAttribute("src")==t||s.getAttribute("data-webpack")==r+n){l=s;break}}l||(u=!0,(l=document.createElement("script")).charset="utf-8",l.timeout=120,a.nc&&l.setAttribute("nonce",a.nc),l.setAttribute("data-webpack",r+n),l.src=t),e[t]=[o];var p=(r,o)=>{l.onerror=l.onload=null,clearTimeout(f);var a=e[t];if(delete e[t],l.parentNode&&l.parentNode.removeChild(l),a&&a.forEach((e=>e(o))),r)return r(o)},f=setTimeout(p.bind(null,void 0,{type:"timeout",target:l}),12e4);l.onerror=p.bind(null,l.onerror),l.onload=p.bind(null,l.onload),u&&document.head.appendChild(l)}},a.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},(()=>{a.S={};var e={},r={};a.I=(t,o)=>{o||(o=[]);var n=r[t];if(n||(n=r[t]={}),!(o.indexOf(n)>=0)){if(o.push(n),e[t])return e[t];a.o(a.S,t)||(a.S[t]={});var i=a.S[t],l="jupyterlab_active_log",u=[];return"default"===t&&((e,r,t,o)=>{var n=i[e]=i[e]||{},u=n[r];(!u||!u.loaded&&(1!=!u.eager?o:l>u.from))&&(n[r]={get:()=>a.e(568).then((()=>()=>a(568))),from:l,eager:!1})})("jupyterlab_active_log","0.1.0"),e[t]=u.length?Promise.all(u).then((()=>e[t]=1)):1}}})(),(()=>{var e;a.g.importScripts&&(e=a.g.location+"");var r=a.g.document;if(!e&&r&&(r.currentScript&&(e=r.currentScript.src),!e)){var t=r.getElementsByTagName("script");t.length&&(e=t[t.length-1].src)}if(!e)throw new Error("Automatic publicPath is not supported in this browser");e=e.replace(/#.*$/,"").replace(/\?.*$/,"").replace(/\/[^\/]+$/,"/"),a.p=e})(),(()=>{var e={346:0};a.f.j=(r,t)=>{var o=a.o(e,r)?e[r]:void 0;if(0!==o)if(o)t.push(o[2]);else{var n=new Promise(((t,a)=>o=e[r]=[t,a]));t.push(o[2]=n);var i=a.p+a.u(r),l=new Error;a.l(i,(t=>{if(a.o(e,r)&&(0!==(o=e[r])&&(e[r]=void 0),o)){var n=t&&("load"===t.type?"missing":t.type),i=t&&t.target&&t.target.src;l.message="Loading chunk "+r+" failed.\n("+n+": "+i+")",l.name="ChunkLoadError",l.type=n,l.request=i,o[1](l)}}),"chunk-"+r,r)}};var r=(r,t)=>{var o,n,[i,l,u]=t,c=0;if(i.some((r=>0!==e[r]))){for(o in l)a.o(l,o)&&(a.m[o]=l[o]);u&&u(a)}for(r&&r(t);c<i.length;c++)n=i[c],a.o(e,n)&&e[n]&&e[n][0](),e[n]=0},t=self.webpackChunkjupyterlab_active_log=self.webpackChunkjupyterlab_active_log||[];t.forEach(r.bind(null,0)),t.push=r.bind(null,t.push.bind(t))})(),a.nc=void 0;var n=a(293);(_JUPYTERLAB=void 0===_JUPYTERLAB?{}:_JUPYTERLAB).jupyterlab_active_log=n})();
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
/* This is a generated file of CSS imports */
|
|
||||||
/* It was generated by @jupyterlab/builder in Build.ensureAssets() */
|
|
||||||
|
|
||||||
import 'jupyterlab_active_log/style/index.js';
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
{
|
|
||||||
"packages": [
|
|
||||||
{
|
|
||||||
"name": "css-loader",
|
|
||||||
"versionInfo": "5.2.7",
|
|
||||||
"licenseId": "MIT",
|
|
||||||
"extractedText": "Copyright JS Foundation and other contributors\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n'Software'), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "style-loader",
|
|
||||||
"versionInfo": "2.0.0",
|
|
||||||
"licenseId": "MIT",
|
|
||||||
"extractedText": "Copyright JS Foundation and other contributors\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n'Software'), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
# syntax = docker/dockerfile:experimental
|
|
||||||
#
|
|
||||||
# NOTE: To build this you will need a docker version > 18.06 with
|
|
||||||
# experimental enabled and DOCKER_BUILDKIT=1
|
|
||||||
#
|
|
||||||
# If you do not use buildkit you are not going to have a good time
|
|
||||||
#
|
|
||||||
# For reference:
|
|
||||||
# https://docs.docker.com/develop/develop-images/build_enhancements/
|
|
||||||
|
|
||||||
# ARG BASE_IMAGE=reg.docker.alibaba-inc.com/pai-dlc/pytorch-training:1.10PAI-gpu-py36-cu113-ubuntu18.04
|
|
||||||
# FROM ${BASE_IMAGE} as dev-base
|
|
||||||
|
|
||||||
# FROM reg.docker.alibaba-inc.com/pai-dlc/pytorch-training:1.10PAI-gpu-py36-cu113-ubuntu18.04 as dev-base
|
|
||||||
FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-devel
|
|
||||||
# FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime
|
|
||||||
# config pip source
|
|
||||||
RUN mkdir /root/.pip
|
|
||||||
COPY docker/rcfiles/pip.conf.tsinghua /root/.pip/pip.conf
|
|
||||||
COPY docker/rcfiles/sources.list.aliyun /etc/apt/sources.list
|
|
||||||
|
|
||||||
# Install essential Ubuntu packages
|
|
||||||
RUN apt-get update &&\
|
|
||||||
apt-get install -y software-properties-common \
|
|
||||||
build-essential \
|
|
||||||
git \
|
|
||||||
wget \
|
|
||||||
vim \
|
|
||||||
curl \
|
|
||||||
zip \
|
|
||||||
zlib1g-dev \
|
|
||||||
unzip \
|
|
||||||
pkg-config \
|
|
||||||
libsndfile1
|
|
||||||
|
|
||||||
# install modelscope and its python env
|
|
||||||
WORKDIR /opt/modelscope
|
|
||||||
COPY . .
|
|
||||||
RUN pip install -r requirements.txt
|
|
||||||
# RUN --mount=type=cache,target=/opt/ccache \
|
|
||||||
# python setup.py install
|
|
||||||
|
|
||||||
# opencv-python-headless conflict with opencv-python installed
|
|
||||||
RUN python setup.py install \
|
|
||||||
&& pip uninstall -y opencv-python-headless
|
|
||||||
|
|
||||||
# prepare modelscope libs
|
|
||||||
COPY docker/scripts/install_libs.sh /tmp/
|
|
||||||
RUN bash /tmp/install_libs.sh && \
|
|
||||||
rm -rf /tmp/install_libs.sh
|
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/modelscope/lib64
|
|
||||||
|
|
||||||
WORKDIR /workspace
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
deb https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
|
|
||||||
|
|
||||||
deb https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
|
|
||||||
|
|
||||||
deb https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
|
|
||||||
|
|
||||||
# deb https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
|
|
||||||
|
|
||||||
deb https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
|
|
||||||
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse
|
|
||||||
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse
|
|
||||||
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
|
|
||||||
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse
|
|
||||||
|
|
||||||
# 预发布软件源,不建议启用
|
|
||||||
# deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse
|
|
||||||
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
set nocompatible
|
|
||||||
set encoding=utf-8
|
|
||||||
set hlsearch
|
|
||||||
set smartindent
|
|
||||||
set ruler
|
|
||||||
set number
|
|
||||||
set ts=2
|
|
||||||
set sw=2
|
|
||||||
set expandtab
|
|
||||||
autocmd FileType make setlocal noexpandtab
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
git clone -b v2.3.3 https://github.com/Dao-AILab/flash-attention && \
|
|
||||||
cd flash-attention && MAX_JOBS=46 python setup.py install && \
|
|
||||||
cd .. && \
|
|
||||||
rm -rf flash-attention
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
export CMAKE_BUILD_PARALLEL_LEVEL=36 && export MAX_JOBS=4 && export CMAKE_CUDA_ARCHITECTURES="50;52;60;61;70;75;80;86" \
|
|
||||||
&& pip install --no-cache-dir fvcore iopath \
|
|
||||||
&& curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz \
|
|
||||||
&& tar xzf 1.10.0.tar.gz \
|
|
||||||
&& export CUB_HOME=$PWD/cub-1.10.0 \
|
|
||||||
&& pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable" \
|
|
||||||
&& rm -fr 1.10.0.tar.gz cub-1.10.0 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y --no-install-recommends pkg-config libglvnd0 libgl1 libglx0 libegl1 libgles2 libglvnd-dev libgl1-mesa-dev libegl1-mesa-dev libgles2-mesa-dev -y \
|
|
||||||
&& git clone https://github.com/NVlabs/nvdiffrast.git \
|
|
||||||
&& cd nvdiffrast \
|
|
||||||
&& pip install --no-cache-dir . \
|
|
||||||
&& cd .. \
|
|
||||||
&& rm -rf nvdiffrast
|
|
||||||
Reference in New Issue
Block a user