diff --git a/.dev_scripts/build_base_image.sh b/.dev_scripts/build_base_image.sh deleted file mode 100644 index c338d6a6..00000000 --- a/.dev_scripts/build_base_image.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# default values. -BASE_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu -BASE_GPU_CUDA113_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel -BASE_GPU_CUDA117_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.7.1-cudnn8-devel -BASE_GPU_CUDA118_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.8.0-cudnn8-devel -BASE_GPU_CUDA121_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:22.04-cuda12.1.0-cudnn8-devel -BASE_GPU_CUDA122_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:22.04-cuda11.2.2-cudnn8-devel -MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope -python_version=3.7.13 -torch_version=1.11.0 -cuda_version=11.7.1 -cudatoolkit_version=11.3 -tensorflow_version=1.15.5 -os_version=20.04 -version=None -is_cpu=False -is_dryrun=False -function usage(){ - echo "usage: build.sh " - echo " --os=ubuntu_version set ubuntu os version, default: 20.04" - echo " --python=python_version set python version, default: $python_version" - echo " --cuda=cuda_version set cuda version,only[11.3.0, 11.7.1], fefault: $cuda_version" - echo " --torch=torch_version set pytorch version, fefault: $torch_version" - echo " --tensorflow=tensorflow_version set tensorflow version, default: $tensorflow_version" - echo " --test option for run test before push image, only push on ci test pass" - echo " --cpu option for build cpu version" - echo " --push option for push image to remote repo" - echo " --dryrun create Dockerfile not build" -} -for i in "$@"; do - case $i in - --os=*) - os_version="${i#*=}" - shift - ;; - --python=*) - python_version="${i#*=}" - shift - ;; - --cuda=*) - cuda_version="${i#*=}" - shift # pytorch version - ;; - --torch=*) - torch_version="${i#*=}" - shift # pytorch version - ;; - --tensorflow=*) - tensorflow_version="${i#*=}" - shift # tensorflow version - ;; - --version=*) - version="${i#*=}" - shift # version - ;; - --cpu) - is_cpu=True - shift # is cpu image - ;; - --push) - is_push=True - shift # option for push image to remote repo - ;; - --dryrun) - is_dryrun=True - shift - ;; - --help) - usage - exit 0 - ;; - -*|--*) - echo "Unknown option $i" - usage - exit 1 - ;; - *) - ;; - esac -done - -if [ "$cuda_version" == 11.3.0 ]; then - echo "Building base image cuda11.3.0" - BASE_GPU_IMAGE=$os_version-$cudatoolkit_version-cudnn8-devel - cudatoolkit_version=cu113 -elif [ "$cuda_version" == 11.7.1 ]; then - echo "Building base image cuda11.7.1" - cudatoolkit_version=cu117 - BASE_GPU_IMAGE=$BASE_GPU_CUDA117_IMAGE -elif [ "$cuda_version" == 11.8.0 ]; then - echo "Building base image cuda11.8.0" - cudatoolkit_version=cu118 - BASE_GPU_IMAGE=$MODELSCOPE_REPO_ADDRESS:$os_version-cuda$cuda_version-cudnn8-devel -elif [ "$cuda_version" == 12.1.0 ]; then - cudatoolkit_version=cu121 - BASE_GPU_IMAGE=$BASE_GPU_CUDA121_IMAGE -else - echo "Unsupport cuda version: $cuda_version" - exit 1 -fi - -if [ "$is_cpu" == "True" ]; then - export BASE_IMAGE=$BASE_CPU_IMAGE:$os_version - base_tag=ubuntu$os_version - export USE_GPU=False -else - export BASE_IMAGE=$BASE_GPU_IMAGE - base_tag=ubuntu$os_version-cuda$cuda_version - export USE_GPU=True -fi - -if [[ $python_version == 3.7* ]]; then - base_tag=$base_tag-py37 -elif [[ $python_version == 3.8* ]]; then - base_tag=$base_tag-py38 -elif [[ $python_version == 3.10* ]]; then - base_tag=$base_tag-py310 -else - echo "Unsupport python version: $python_version" - exit 1 -fi -# target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base -# cpu no tensorflow -if [ "$is_cpu" == "True" ]; then - target_image_tag=$base_tag-torch$torch_version-base -else - target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-base -fi - -export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag -export PYTHON_VERSION=$python_version -export TORCH_VERSION=$torch_version -export CUDATOOLKIT_VERSION=$cudatoolkit_version -export TENSORFLOW_VERSION=$tensorflow_version -echo "From: $BASE_IMAGE build: $target_image_tag" -echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\n" -docker_file_content=`cat docker/Dockerfile.ubuntu_base` -printf "$docker_file_content" > Dockerfile - -if [ "$is_dryrun" == "True" ]; then - echo 'Dockerfile created' - exit 0 -fi - -# DOCKER_BUILDKIT=0 -while true -do - DOCKER_BUILDKIT=0 docker build -t $IMAGE_TO_BUILD \ - --build-arg USE_GPU \ - --build-arg BASE_IMAGE \ - --build-arg PYTHON_VERSION \ - --build-arg TORCH_VERSION \ - --build-arg CUDATOOLKIT_VERSION \ - --build-arg TENSORFLOW_VERSION \ - -f Dockerfile . - if [ $? -eq 0 ]; then - echo "Image build done" - break - else - echo "Running docker build command error, we will retry" - fi -done - -if [ "$is_push" == "True" ]; then - echo "Pushing image: $IMAGE_TO_BUILD" - docker push $IMAGE_TO_BUILD -fi diff --git a/.dev_scripts/build_image.sh b/.dev_scripts/build_image.sh deleted file mode 100644 index f22d70cd..00000000 --- a/.dev_scripts/build_image.sh +++ /dev/null @@ -1,204 +0,0 @@ -#!/bin/bash -# default values. -#BASE_PY37_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-base -#BASE_PY38_CPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-base -#BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-base -#BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.7.1-py38-torch2.0.1-tf1.15.5-base -#BASE_PY38_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.7.1-py38-torch1.13.1-tf2.6.0-base -#BASE_PY37_GPU_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base -MODELSCOPE_REPO_ADDRESS=reg.docker.alibaba-inc.com/modelscope/modelscope -python_version=3.7.13 -torch_version=1.11.0 -cudatoolkit_version=11.7 -tensorflow_version=1.15.5 -modelscope_version=None -cuda_version=11.7.1 -is_dsw=False -is_cpu=False -build_branch='master' -function usage(){ - echo "usage: build.sh " - echo " --python=python_version set python version, default: $python_version" - echo " --cuda=cuda_version set cuda version,only[11.3.0, 11.7.1], fefault: $cuda_version" - echo " --torch=torch_version set pytorch version, fefault: $torch_version" - echo " --tensorflow=tensorflow_version set tensorflow version, default: $tensorflow_version" - echo " --modelscope=modelscope_version set modelscope version, default: $modelscope_version" - echo " --branch=build_branch set modelscope build branch, default: $build_branch" - echo " --cpu option for build cpu version" - echo " --dsw option for build dsw version" - echo " --push option for push image to remote repo" -} -for i in "$@"; do - case $i in - --python=*) - python_version="${i#*=}" - shift - ;; - --cuda=*) - cuda_version="${i#*=}" - if [ "$cuda_version" == "11.3.0" ]; then - cudatoolkit_version=11.3 - elif [ "$cuda_version" == "11.7.1" ]; then - cudatoolkit_version=11.7 - elif [ "$cuda_version" == "11.8.0" ]; then - cudatoolkit_version=11.8 - elif [ "$cuda_version" == "12.1.0" ]; then - cudatoolkit_version=12.1 - else - echo "Unsupport cuda version $cuda_version" - exit 1 - fi - shift # pytorch version - ;; - --torch=*) - torch_version="${i#*=}" - shift # pytorch version - ;; - --tensorflow=*) - tensorflow_version="${i#*=}" - shift # tensorflow version - ;; - --cudatoolkit=*) - cudatoolkit_version="${i#*=}" - shift # cudatoolkit for pytorch - ;; - --modelscope=*) - modelscope_version="${i#*=}" - shift # modelscope version - ;; - --branch=*) - build_branch="${i#*=}" - shift # build branch - ;; - --cpu) - is_cpu=True - shift # is cpu image - ;; - --dsw) - is_dsw=True - shift # is dsw, will set dsw cache location - ;; - --push) - is_push=True - shift # option for push image to remote repo - ;; - --help) - usage - exit 0 - ;; - -*|--*) - echo "Unknown option $i" - usage - exit 1 - ;; - *) - ;; - esac -done - -if [ "$modelscope_version" == "None" ]; then - echo "ModelScope version must specify!" - exit 1 -fi -if [ "$is_cpu" == "True" ]; then - base_tag=ubuntu20.04 - export USE_GPU=False -else - base_tag=ubuntu20.04-cuda$cuda_version - export USE_GPU=True -fi - -if [[ $python_version == 3.7* ]]; then - if [ "$is_cpu" == "True" ]; then - echo "Building python3.7 cpu image" - export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py37-torch$torch_version-tf$tensorflow_version-base - else - echo "Building python3.7 gpu image" - export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda$cuda_version-py37-torch$torch_version-tf$tensorflow_version-base - fi - base_tag=$base_tag-py37 -elif [[ $python_version == 3.8* ]]; then - if [ "$is_cpu" == "True" ]; then - echo "Building python3.8 cpu image" - export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-py38-torch$torch_version-tf$tensorflow_version-base - else - echo "Building python3.8 gpu image" - export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda$cuda_version-py38-torch$torch_version-tf$tensorflow_version-base - fi - base_tag=$base_tag-py38 -elif [[ $python_version == 3.10* ]]; then - if [ "$is_cpu" == "True" ]; then - echo "Building python3.10 cpu image" - base_tag=ubuntu22.04-py310 - export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-py310-torch$torch_version-base - else - echo "Building python3.10 gpu image" - base_tag=ubuntu22.04-cuda$cuda_version-py310 - # reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda12.1.0-py310-torch2.1.0-tf2.14.0-base - export BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu22.04-cuda$cuda_version-py310-torch$torch_version-tf$tensorflow_version-base - fi -else - echo "Unsupport python version: $python_version" - exit 1 -fi -# cpu not intall tensorflow -# target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-$modelscope_version-test -if [ "$is_cpu" == "True" ]; then - target_image_tag=$base_tag-torch$torch_version-$modelscope_version-test -else - target_image_tag=$base_tag-torch$torch_version-tf$tensorflow_version-$modelscope_version-test -fi -export IMAGE_TO_BUILD=$MODELSCOPE_REPO_ADDRESS:$target_image_tag -export PYTHON_VERSION=$python_version -export TORCH_VERSION=$torch_version -export CUDATOOLKIT_VERSION=$cudatoolkit_version -export TENSORFLOW_VERSION=$tensorflow_version -echo -e "Building image with:\npython$python_version\npytorch$torch_version\ntensorflow:$tensorflow_version\ncudatoolkit:$cudatoolkit_version\ncpu:$is_cpu\nis_ci:$is_ci_test\nis_dsw:$is_dsw\n" -echo -e "Base iamge: $BASE_IMAGE" -docker_file_content=`cat docker/Dockerfile.ubuntu` - -BUILD_HASH_ID=$(git rev-parse HEAD) -# install thrid part library -docker_file_content="${docker_file_content} \nRUN export COMMIT_ID=$BUILD_HASH_ID && pip install --no-cache-dir -U adaseq pai-easycv && pip install --no-cache-dir -U 'ms-swift' 'decord' 'qwen_vl_utils' 'pyav' 'librosa' 'funasr' autoawq 'timm>0.9.5' 'transformers' 'accelerate' 'peft' 'optimum' 'trl' 'outlines<0.1'" - -docker_file_content="${docker_file_content} \nRUN pip uninstall modelscope -y && export COMMIT_ID=$BUILD_HASH_ID && cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $build_branch --single-branch $REPO_URL && cd modelscope && pip install . && cd / && rm -fr /tmp/modelscope && pip cache purge;" - -echo "$is_dsw" -if [ "$is_dsw" == "False" ]; then - echo "Not DSW image" -else - echo "Building dsw image will need set ModelScope lib cache location." - docker_file_content="${docker_file_content} \nENV MODELSCOPE_CACHE=/mnt/workspace/.cache/modelscope" - # pre compile extension - docker_file_content="${docker_file_content} \nRUN pip uninstall -y tb-nightly tensorboard && pip install --no-cache-dir -U tensorboard && TORCH_CUDA_ARCH_LIST='6.0 6.1 7.0 7.5 8.0 8.9 9.0 8.6+PTX' python -c 'from modelscope.utils.pre_compile import pre_compile_all;pre_compile_all()'" -fi - - -docker_file_content="${docker_file_content} \n RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \ - pip config set install.trusted-host mirrors.aliyun.com && \ - cp /tmp/resources/ubuntu2204.aliyun /etc/apt/sources.list " - -printf "$docker_file_content" > Dockerfile - -while true -do - docker build --progress=plain -t $IMAGE_TO_BUILD \ - --build-arg USE_GPU \ - --build-arg BASE_IMAGE \ - --build-arg PYTHON_VERSION \ - --build-arg TORCH_VERSION \ - --build-arg CUDATOOLKIT_VERSION \ - --build-arg TENSORFLOW_VERSION \ - -f Dockerfile . - if [ $? -eq 0 ]; then - echo "Image build done" - break - else - echo "Running docker build command error, we will retry" - fi -done - -if [ "$is_push" == "True" ]; then - echo "Pushing image: $IMAGE_TO_BUILD" - docker push $IMAGE_TO_BUILD -fi diff --git a/.dev_scripts/run_docker.sh b/.dev_scripts/run_docker.sh deleted file mode 100644 index 8999458a..00000000 --- a/.dev_scripts/run_docker.sh +++ /dev/null @@ -1,7 +0,0 @@ -#sudo docker run --name zwm_maas -v /home/wenmeng.zwm/workspace:/home/wenmeng.zwm/workspace --net host -ti reg.docker.alibaba-inc.com/pai-dlc/tensorflow-training:2.3-gpu-py36-cu101-ubuntu18.04 bash -#sudo docker run --name zwm_maas_pytorch -v /home/wenmeng.zwm/workspace:/home/wenmeng.zwm/workspace --net host -ti reg.docker.alibaba-inc.com/pai-dlc/pytorch-training:1.10PAI-gpu-py36-cu113-ubuntu18.04 bash -CONTAINER_NAME=modelscope-dev -IMAGE_NAME=registry.cn-shanghai.aliyuncs.com/modelscope/modelscope -IMAGE_VERSION=v0.1.1-16-g62856fa-devel -MOUNT_DIR=/home/wenmeng.zwm/workspace -sudo docker run --name $CONTAINER_NAME -v $MOUNT_DIR:$MOUNT_DIR --net host -ti ${IMAGE_NAME}:${IMAGE_VERSION} bash diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml new file mode 100644 index 00000000..0966e294 --- /dev/null +++ b/.github/workflows/docker-image.yml @@ -0,0 +1,48 @@ +name: Build Docker Images + +on: + workflow_dispatch: + inputs: + modelscope_branch: + description: 'ModelScope branch to build from' + required: true + image_type: + description: 'The image type to build' + required: true + modelscope_version: + description: 'ModelScope version to use' + required: true + swift_branch: + description: 'SWIFT branch to use' + required: true + torch_version: + description: 'Torch version to use' + required: false + torchvision_version: + description: 'Torchvision version to use' + required: false + cuda_version: + description: 'CUDA version to use' + required: false + torchaudio_version: + description: 'TorchAudio version to use' + required: false + vllm_version: + description: 'VLLM version to use' + required: false + lmdeploy_version: + description: 'LMDeploy version to use' + required: false + +jobs: + build: + runs-on: [modelscope-self-hosted-us] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.modelscope_branch }} + + - name: Build Docker Image + run: python docker/build_image.py --image_type ${{ github.event.inputs.image_type }} --modelscope_branch ${{ github.event.inputs.modelscope_branch }} --modelscope_version ${{ github.event.inputs.modelscope_version }} --swift_branch ${{ github.event.inputs.swift_branch }} --torch_version ${{ github.event.inputs.torch_version }} --torchvision_version ${{ github.event.inputs.torchvision_version }} --cuda_version ${{ github.event.inputs.cuda_version }} --torchaudio_version ${{ github.event.inputs.torchaudio_version }} --vllm_version ${{ github.event.inputs.vllm_version }} --lmdeploy_version ${{ github.event.inputs.lmdeploy_version }} diff --git a/docker/.dockerignore b/docker/.dockerignore index 14284cb6..0fc13a9b 100644 --- a/docker/.dockerignore +++ b/docker/.dockerignore @@ -1,4 +1,3 @@ -*.sh *.md *.dockerfile *.zip diff --git a/docker/Dockerfile.extra_install b/docker/Dockerfile.extra_install new file mode 100644 index 00000000..01129297 --- /dev/null +++ b/docker/Dockerfile.extra_install @@ -0,0 +1,141 @@ +ENV TZ=Asia/Shanghai +ENV arch=x86_64 +SHELL ["/bin/bash", "-c"] +COPY docker/rcfiles /tmp/resources +RUN apt-get update && apt-get upgrade -y && apt-get install -y --reinstall ca-certificates && \ + apt-get install -y make apt-utils openssh-server locales wget git strace gdb sox libopenmpi-dev curl \ + iputils-ping net-tools iproute2 autoconf automake gperf libre2-dev libssl-dev \ + libtool libcurl4-openssl-dev libb64-dev libgoogle-perftools-dev patchelf \ + rapidjson-dev scons software-properties-common pkg-config unzip zlib1g-dev \ + libbz2-dev libreadline-dev libsqlite3-dev llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev liblzma-dev \ + libarchive-dev libxml2-dev libnuma-dev cmake \ + libgeos-dev strace vim ffmpeg libsm6 tzdata language-pack-zh-hans \ + ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy libxext6 build-essential ninja-build \ + libjpeg-dev libpng-dev && \ + wget https://packagecloud.io/github/git-lfs/packages/debian/bullseye/git-lfs_3.2.0_amd64.deb/download -O ./git-lfs_3.2.0_amd64.deb && \ + dpkg -i ./git-lfs_3.2.0_amd64.deb && \ + rm -f ./git-lfs_3.2.0_amd64.deb && \ + locale-gen zh_CN && \ + locale-gen zh_CN.utf8 && \ + update-locale LANG=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 && \ + ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + dpkg-reconfigure --frontend noninteractive tzdata && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8 +RUN wget -O /tmp/boost.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.80.0/source/boost_1_80_0.tar.gz && \ + cd /tmp && tar xzf boost.tar.gz && \ + mv /tmp/boost_1_80_0/boost /usr/include/boost && \ + rm -rf /tmp/boost_1_80_0 && rm -rf boost.tar.gz + +#install and config python copy from https://github.com/docker-library/python/blob/1b7a1106674a21e699b155cbd53bf39387284cca/3.10/bookworm/Dockerfile +ARG PYTHON_VERSION={python_version} +ENV PATH /usr/local/bin:$PATH +ENV GPG_KEY A035C8C19219BA821ECEA86B64E628F8D684696D +ENV PYTHON_VERSION {python_version} + +#install and config python copy from https://github.com/docker-library/python/blob/1b7a1106674a21e699b155cbd53bf39387284cca/3.10/bookworm/Dockerfile +ARG PYTHON_VERSION={python_version} +ENV PATH /usr/local/bin:$PATH +ENV GPG_KEY A035C8C19219BA821ECEA86B64E628F8D684696D +ENV PYTHON_VERSION {python_version} + +RUN set -eux; \ + \ + wget -O python.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz"; \ + wget -O python.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc"; \ + GNUPGHOME="$(mktemp -d)"; export GNUPGHOME; \ + gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys "$GPG_KEY"; \ + gpg --batch --verify python.tar.xz.asc python.tar.xz; \ + gpgconf --kill all; \ + rm -rf "$GNUPGHOME" python.tar.xz.asc; \ + mkdir -p /usr/src/python; \ + tar --extract --directory /usr/src/python --strip-components=1 --file python.tar.xz; \ + rm python.tar.xz; \ + \ + cd /usr/src/python; \ + gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; \ + ./configure \ + --build="$gnuArch" \ + --enable-loadable-sqlite-extensions \ + --enable-optimizations \ + --enable-option-checking=fatal \ + --enable-shared \ + --with-lto \ + --with-system-expat \ + --without-ensurepip \ + ; \ + nproc="$(nproc)"; \ + EXTRA_CFLAGS="$(dpkg-buildflags --get CFLAGS)"; \ + LDFLAGS="$(dpkg-buildflags --get LDFLAGS)"; \ + make -j "$nproc" \ + "EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" \ + "LDFLAGS=${LDFLAGS:-}" \ + "PROFILE_TASK=${PROFILE_TASK:-}" \ + ; \ + rm python; \ + make -j "$nproc" \ + "EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" \ + "LDFLAGS=${LDFLAGS:--Wl},-rpath='\$\$ORIGIN/../lib'" \ + "PROFILE_TASK=${PROFILE_TASK:-}" \ + python \ + ; \ + make install; \ + \ + bin="$(readlink -ve /usr/local/bin/python3)"; \ + dir="$(dirname "$bin")"; \ + mkdir -p "/usr/share/gdb/auto-load/$dir"; \ + cp -vL Tools/gdb/libpython.py "/usr/share/gdb/auto-load/$bin-gdb.py"; \ + \ + cd /; \ + rm -rf /usr/src/python; \ + \ + find /usr/local -depth \ + \( \ + \( -type d -a \( -name test -o -name tests -o -name idle_test \) \) \ + -o \( -type f -a \( -name '*.pyc' -o -name '*.pyo' -o -name 'libpython*.a' \) \) \ + \) -exec rm -rf '{}' + \ + ; \ + \ + ldconfig; \ + \ + python3 --version + +# make some useful symlinks that are expected to exist ("/usr/local/bin/python" and friends) +RUN set -eux; \ + for src in idle3 pydoc3 python3 python3-config; do \ + dst="$(echo "$src" | tr -d 3)"; \ + [ -s "/usr/local/bin/$src" ]; \ + [ ! -e "/usr/local/bin/$dst" ]; \ + ln -svT "$src" "/usr/local/bin/$dst"; \ + done + +# if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value ''" +ENV PYTHON_PIP_VERSION 23.0.1 +# https://github.com/docker-library/python/issues/365 +ENV PYTHON_SETUPTOOLS_VERSION 65.5.1 +# https://github.com/pypa/get-pip +ENV PYTHON_GET_PIP_URL https://github.com/pypa/get-pip/raw/dbf0c85f76fb6e1ab42aa672ffca6f0a675d9ee4/public/get-pip.py +ENV PYTHON_GET_PIP_SHA256 dfe9fd5c28dc98b5ac17979a953ea550cec37ae1b47a5116007395bfacff2ab9 + +RUN set -eux; \ + \ + wget -O get-pip.py "$PYTHON_GET_PIP_URL"; \ + echo "$PYTHON_GET_PIP_SHA256 *get-pip.py" | sha256sum -c -; \ + \ + export PYTHONDONTWRITEBYTECODE=1; \ + \ + python get-pip.py \ + --disable-pip-version-check \ + --no-cache-dir \ + --no-compile \ + "pip==$PYTHON_PIP_VERSION" \ + "setuptools==$PYTHON_SETUPTOOLS_VERSION" \ + ; \ + rm -f get-pip.py; \ + \ + pip --version +# end of install python + +pip install tf-keras -i https://mirrors.aliyun.com/pypi/simple diff --git a/docker/Dockerfile.ubuntu b/docker/Dockerfile.ubuntu index ca6d5e50..f3c326ec 100644 --- a/docker/Dockerfile.ubuntu +++ b/docker/Dockerfile.ubuntu @@ -1,68 +1,28 @@ -ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base -FROM $BASE_IMAGE +FROM {base_image} + +ARG DEBIAN_FRONTEND=noninteractive +ENV TZ=Asia/Shanghai +ENV arch=x86_64 + +COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh RUN apt-get update && \ - apt-get install -y libsox-dev unzip libaio-dev zip iputils-ping telnet sudo && \ + apt-get install -y libsox-dev unzip libaio-dev zip iputils-ping telnet sudo git net-tools && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -ARG CUDA_VERSION=cu121 -# install jupyter plugin -RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \ - cp -r /tmp/resources/jupyter_plugins/* /root/.local/share/jupyter/labextensions/ -# install ollama -RUN curl -fsSL https://ollama.com/install.sh | sh +{extra_content} -COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh -# python3.8 pip install git+https://github.com/jin-s13/xtcocoapi.git@v1.13 -# pip install git+https://github.com/gatagat/lap.git@v0.4.0 -RUN pip install --no-cache-dir numpy 'cython<=0.29.36' funtextprocessing kwsbp==0.0.6 safetensors typeguard==2.13.3 scikit-learn librosa==0.9.2 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \ + pip config set install.trusted-host mirrors.aliyun.com && \ + cp /tmp/resources/ubuntu2204.aliyun /etc/apt/sources.list -RUN pip install --no-cache-dir adaseq text2sql_lgesql==1.3.0 \ - git+https://github.com/jin-s13/xtcocoapi.git@v1.14 \ - git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps +RUN echo "cache bust $(date +%Y%m%d%H%M%S)" -RUN pip install --no-cache-dir mpi4py paint_ldm \ - mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 \ - ipykernel fasttext fairseq deepspeed apex -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +COPY {meta_file} /tmp/install.sh -ARG USE_GPU - - -RUN if [ "$USE_GPU" = "True" ] ; then \ - CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0" pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git'; \ - else \ - echo 'cpu unsupport detectron2'; \ - fi - -# install dependencies -COPY requirements /var/modelscope -RUN pip install --no-cache-dir -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir -r /var/modelscope/audio.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir -r /var/modelscope/cv.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir -r /var/modelscope/multi-modal.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir -r /var/modelscope/nlp.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir -r /var/modelscope/science.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir -r /var/modelscope/tests.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir -r /var/modelscope/server.txt && \ - pip install --no-cache-dir https://modelscope.oss-cn-beijing.aliyuncs.com/packages/imageio_ffmpeg-0.4.9-py3-none-any.whl --force && \ - pip install --no-cache-dir 'scipy<1.13.0' && \ - pip cache purge -# 'scipy<1.13.0' for cannot import name 'kaiser' from 'scipy.signal' -COPY examples /modelscope/examples -# torchmetrics==0.11.4 for ofa -# tinycudann for cuda12.1.0 pytorch 2.1.2 -RUN if [ "$USE_GPU" = "True" ] ; then \ - pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr optimum && \ - pip install --no-cache-dir flash_attn==2.5.9.post1 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu121/ && \ - pip install --no-cache-dir -U 'xformers<0.0.27' --index-url https://download.pytorch.org/whl/cu121 && \ - pip install --no-cache-dir --force tinycudann==1.7 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter && \ - pip install --no-cache-dir -U triton 'vllm==0.5.2' https://modelscope.oss-cn-beijing.aliyuncs.com/packages/lmdeploy-0.5.0-cp310-cp310-linux_x86_64.whl; \ - else \ - echo 'cpu unsupport vllm auto-gptq'; \ - fi +RUN sh /tmp/install.sh {version_args} ENV SETUPTOOLS_USE_DISTUTILS=stdlib ENV VLLM_USE_MODELSCOPE=True ENV LMDEPLOY_USE_MODELSCOPE=True +ENV MODELSCOPE_CACHE=/mnt/workspace/.cache/modelscope diff --git a/docker/Dockerfile.ubuntu_base b/docker/Dockerfile.ubuntu_base index 360f216f..ebad61d9 100644 --- a/docker/Dockerfile.ubuntu_base +++ b/docker/Dockerfile.ubuntu_base @@ -1,11 +1,10 @@ -ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/ubuntu:20.04-cuda11.3.0-cudnn8-devel +ARG BASE_IMAGE={base_image} FROM $BASE_IMAGE ARG DEBIAN_FRONTEND=noninteractive ENV TZ=Asia/Shanghai ENV arch=x86_64 SHELL ["/bin/bash", "-c"] COPY docker/rcfiles /tmp/resources -COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins RUN apt-get update && apt-get upgrade -y && apt-get install -y --reinstall ca-certificates && \ apt-get install -y make apt-utils openssh-server locales wget git strace gdb sox libopenmpi-dev curl \ iputils-ping net-tools iproute2 autoconf automake gperf libre2-dev libssl-dev \ @@ -34,10 +33,10 @@ RUN wget -O /tmp/boost.tar.gz https://boostorg.jfrog.io/artifactory/main/release rm -rf /tmp/boost_1_80_0 && rm -rf boost.tar.gz #install and config python copy from https://github.com/docker-library/python/blob/1b7a1106674a21e699b155cbd53bf39387284cca/3.10/bookworm/Dockerfile -ARG PYTHON_VERSION=3.10.14 +ARG PYTHON_VERSION={python_version} ENV PATH /usr/local/bin:$PATH ENV GPG_KEY A035C8C19219BA821ECEA86B64E628F8D684696D -ENV PYTHON_VERSION 3.10.14 +ENV PYTHON_VERSION {python_version} RUN set -eux; \ \ @@ -139,15 +138,14 @@ RUN set -eux; \ pip --version # end of install python -ARG USE_GPU=True +ARG USE_GPU={use_gpu} # install pytorch -ARG TORCH_VERSION=2.3.0 -ARG CUDATOOLKIT_VERSION=cu121 +ARG TORCH_VERSION={torch_version} +ARG CUDATOOLKIT_VERSION={cudatoolkit_version} RUN if [ "$USE_GPU" = "True" ] ; then \ - pip install --no-cache-dir "torch==2.3.0" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \ - pip install --no-cache-dir torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121; \ + pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio; \ else \ pip install --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \ fi @@ -192,7 +190,7 @@ RUN if [ "$USE_GPU" = "True" ] ; then \ fi -ARG TENSORFLOW_VERSION=1.15.5 +ARG TENSORFLOW_VERSION={tf_version} RUN if [ "$USE_GPU" = "True" ] ; then \ pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \ else \ @@ -200,9 +198,9 @@ ARG TENSORFLOW_VERSION=1.15.5 fi RUN if [ "$USE_GPU" = "True" ] ; then \ - pip install --no-cache-dir "https://modelscope.oss-cn-beijing.aliyuncs.com/packages/mmcv/mmcv_full-1.7.0-cp310-cp310-linux_x86_64.whl"; \ + cd /tmp && git clone -b ms_build --single-branch https://github.com/tastelikefeet/mmcv.git && cd mmcv && TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0;7.5;8.0;8.9;9.0;8.6+PTX" MMCV_WITH_OPS=1 MAX_JOBS=32 FORCE_CUDA=1 python setup.py bdist_wheel && cd / && rm -fr /tmp/mmcv && pip cache purge; \ else \ - pip install --no-cache-dir mmcv_full==1.7.0+cputorch230 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \ + cd /tmp && git clone -b ms_build --single-branch https://github.com/tastelikefeet/mmcv.git && cd mmcv && MMCV_WITH_OPS=1 MAX_JOBS=32 python setup.py bdist_wheel && cd / && rm -fr /tmp/mmcv && pip cache purge; \ fi ENTRYPOINT [] diff --git a/docker/build_image.py b/docker/build_image.py new file mode 100644 index 00000000..50e441c7 --- /dev/null +++ b/docker/build_image.py @@ -0,0 +1,284 @@ +import argparse +import os +from typing import Any + +docker_registry = os.environ['DOCKER_REGISTRY'] +assert docker_registry, 'You must pass a valid DOCKER_REGISTRY' + + +class Builder: + + def __init__(self, args: Any, dry_run: bool): + self.args = self.init_args(args) + self.dry_run = dry_run + self.args.cudatoolkit_version = self._generate_cudatoolkit_version( + args.cuda_version) + self.args.python_tag = self._generate_python_tag(args.python_version) + + def init_args(self, args: Any) -> Any: + if not args.base_image: + # A mirrored image of nvidia/cuda:12.4.0-devel-ubuntu22.04 + args.base_image = ( + 'modelscope-image-registry.cn-wulanchabu.cr.aliyuncs.com/' + 'modelscope/mirror:12.4.0-devel-ubuntu22.04') + if not args.torch_version: + args.torch_version = '2.3.0' + args.torchaudio_version = '2.3.0' + args.torchvision_version = '0.18.0' + if not args.tf_version: + args.tf_version = '2.16.1' + if not args.cuda_version: + args.cuda_version = '12.1.0' + if not args.vllm_version: + args.vllm_version = '0.5.1' + if not args.lmdeploy_version: + args.lmdeploy_version = '0.5.0' + if not args.autogptq_version: + args.autogptq_version = '0.7.1' + return args + + def _generate_cudatoolkit_version(self, cuda_version: str) -> str: + cuda_version = cuda_version[:cuda_version.rfind('.')] + return 'cu' + cuda_version.replace('.', '') + + def _generate_python_tag(self, python_version: str) -> str: + python_version = python_version[:python_version.rfind('.')] + return 'py' + python_version.replace('.', '') + + def generate_dockerfile(self) -> str: + raise NotImplementedError + + def _save_dockerfile(self, content: str) -> None: + if os.path.exists('./Dockerfile'): + os.remove('./Dockerfile') + with open('./Dockerfile', 'w') as f: + f.write(content) + + def build(self) -> int: + pass + + def push(self) -> int: + pass + + def __call__(self): + content = self.generate_dockerfile() + self._save_dockerfile(content) + if not self.dry_run: + ret = self.build() + if ret != 0: + raise RuntimeError(f'Docker build error with errno: {ret}') + ret = self.push() + if ret != 0: + raise RuntimeError(f'Docker push error with errno: {ret}') + + +class BaseCPUImageBuilder(Builder): + + def generate_dockerfile(self) -> str: + with open('docker/Dockerfile.ubuntu_base', 'r') as f: + content = f.read() + content = content.replace('{base_image}', self.args.base_image) + content = content.replace('{use_gpu}', 'False') + content = content.replace('{python_version}', self.args.python_version) + content = content.replace('{torch_version}', self.args.torch_version) + content = content.replace('{cudatoolkit_version}', + self.args.cudatoolkit_version) + content = content.replace('{tf_version}', self.args.tf_version) + return content + + def build(self): + image_tag = f'{docker_registry}:ubuntu{self.args.ubuntu_version}-torch{self.args.torch_version}-base' + return os.system( + f'DOCKER_BUILDKIT=0 docker build -t {image_tag} -f Dockerfile .') + + def push(self): + image_tag = f'{docker_registry}:ubuntu{self.args.ubuntu_version}-torch{self.args.torch_version}-base' + return os.system(f'docker push {image_tag}') + + +class BaseGPUImageBuilder(Builder): + + def generate_dockerfile(self) -> str: + with open('docker/Dockerfile.ubuntu_base', 'r') as f: + content = f.read() + content = content.replace('{base_image}', self.args.base_image) + content = content.replace('{use_gpu}', 'True') + content = content.replace('{python_version}', self.args.python_version) + content = content.replace('{torch_version}', self.args.torch_version) + content = content.replace('{cudatoolkit_version}', + self.args.cudatoolkit_version) + content = content.replace('{tf_version}', self.args.tf_version) + return content + + def build(self) -> int: + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-' + f'torch{self.args.torch_version}-tf{self.args.tf_version}-base') + return os.system( + f'DOCKER_BUILDKIT=0 docker build -t {image_tag} -f Dockerfile .') + + def push(self): + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-' + f'torch{self.args.torch_version}-tf{self.args.tf_version}-base') + return os.system(f'docker push {image_tag}') + + +class CPUImageBuilder(Builder): + + def generate_dockerfile(self) -> str: + meta_file = './docker/install_cpu.sh' + version_args = ( + f'{self.args.torch_version} {self.args.torchvision_version} ' + f'{self.args.torchaudio_version} {self.args.modelscope_branch} {self.args.swift_branch}' + ) + base_image = f'{docker_registry}:ubuntu{self.args.ubuntu_version}-torch{self.args.torch_version}-base' + extra_content = """\nRUN pip install adaseq\nRUN pip install pai-easycv""" + + with open('docker/Dockerfile.ubuntu', 'r') as f: + content = f.read() + content = content.replace('{base_image}', base_image) + content = content.replace('{extra_content}', extra_content) + content = content.replace('{meta_file}', meta_file) + content = content.replace('{version_args}', version_args) + return content + + def build(self) -> int: + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-{self.args.python_tag}-' + f'torch{self.args.torch_version}-{self.args.modelscope_version}-test' + ) + return os.system(f'docker build -t {image_tag} -f Dockerfile .') + + def push(self): + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-{self.args.python_tag}-' + f'torch{self.args.torch_version}-{self.args.modelscope_version}-test' + ) + return os.system(f'docker push {image_tag}') + + +class GPUImageBuilder(Builder): + + def generate_dockerfile(self) -> str: + meta_file = './docker/install.sh' + extra_content = """\nRUN pip install adaseq\nRUN pip install pai-easycv""" + version_args = ( + f'{self.args.torch_version} {self.args.torchvision_version} {self.args.torchaudio_version} ' + f'{self.args.vllm_version} {self.args.lmdeploy_version} {self.args.autogptq_version} ' + f'{self.args.modelscope_branch} {self.args.swift_branch}') + base_image = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-' + f'torch{self.args.torch_version}-tf{self.args.tf_version}-base') + with open('docker/Dockerfile.ubuntu', 'r') as f: + content = f.read() + content = content.replace('{base_image}', base_image) + content = content.replace('{extra_content}', extra_content) + content = content.replace('{meta_file}', meta_file) + content = content.replace('{version_args}', version_args) + return content + + def build(self) -> int: + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-' + f'{self.args.python_tag}-torch{self.args.torch_version}-tf{self.args.tf_version}-' + f'{self.args.modelscope_version}-test') + return os.system(f'docker build -t {image_tag} -f Dockerfile .') + + def push(self): + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-' + f'{self.args.python_tag}-torch{self.args.torch_version}-tf{self.args.tf_version}-' + f'{self.args.modelscope_version}-test') + return os.system(f'docker push {image_tag}') + + +class LLMImageBuilder(Builder): + + def init_args(self, args) -> Any: + if not args.base_image: + # A mirrored image of nvidia/cuda:12.4.0-devel-ubuntu22.04 + args.base_image = ( + 'modelscope-image-registry.cn-wulanchabu.cr.aliyuncs.com/modelscope/' + 'mirror:12.4.0-devel-ubuntu22.04') + if not args.torch_version: + args.torch_version = '2.4.0' + args.torchaudio_version = '2.4.0' + args.torchvision_version = '0.19.0' + if not args.cuda_version: + args.cuda_version = '12.4.0' + if not args.vllm_version: + args.vllm_version = '0.6.0' + if not args.lmdeploy_version: + args.lmdeploy_version = '0.6.1' + if not args.autogptq_version: + args.autogptq_version = '0.7.1' + return args + + def generate_dockerfile(self) -> str: + meta_file = './docker/install.sh' + with open('docker/Dockerfile.extra_install', 'r') as f: + extra_content = f.read() + extra_content = extra_content.replace('{python_version}', + self.args.python_version) + version_args = ( + f'{self.args.torch_version} {self.args.torchvision_version} {self.args.torchaudio_version} ' + f'{self.args.vllm_version} {self.args.lmdeploy_version} {self.args.autogptq_version} ' + f'{self.args.modelscope_branch} {self.args.swift_branch}') + with open('docker/Dockerfile.ubuntu', 'r') as f: + content = f.read() + content = content.replace('{base_image}', self.args.base_image) + content = content.replace('{extra_content}', extra_content) + content = content.replace('{meta_file}', meta_file) + content = content.replace('{version_args}', version_args) + return content + + def build(self) -> int: + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-' + f'{self.args.python_tag}-torch{self.args.torch_version}-{self.args.modelscope_version}-LLM-test' + ) + return os.system(f'docker build -t {image_tag} -f Dockerfile .') + + def push(self): + image_tag = ( + f'{docker_registry}:ubuntu{self.args.ubuntu_version}-cuda{self.args.cuda_version}-' + f'{self.args.python_tag}-torch{self.args.torch_version}-{self.args.modelscope_version}-LLM-test' + ) + return os.system(f'docker push {image_tag}') + + +parser = argparse.ArgumentParser() +parser.add_argument('--base_image', type=str, default=None) +parser.add_argument('--image_type', type=str) +parser.add_argument('--python_version', type=str, default='3.10.14') +parser.add_argument('--ubuntu_version', type=str, default='22.04') +parser.add_argument('--torch_version', type=str, default=None) +parser.add_argument('--torchvision_version', type=str, default=None) +parser.add_argument('--cuda_version', type=str, default=None) +parser.add_argument('--torchaudio_version', type=str, default=None) +parser.add_argument('--tf_version', type=str, default=None) +parser.add_argument('--vllm_version', type=str, default=None) +parser.add_argument('--lmdeploy_version', type=str, default=None) +parser.add_argument('--autogptq_version', type=str, default=None) +parser.add_argument('--modelscope_branch', type=str, default='master') +parser.add_argument('--modelscope_version', type=str, default='9.99.0') +parser.add_argument('--swift_branch', type=str, default='main') +parser.add_argument('--dry_run', type=int, default=0) + +args = parser.parse_args() + +if args.image_type.lower() == 'base_cpu': + builder_cls = BaseCPUImageBuilder +elif args.image_type.lower() == 'base_gpu': + builder_cls = BaseGPUImageBuilder +elif args.image_type.lower() == 'cpu': + builder_cls = CPUImageBuilder +elif args.image_type.lower() == 'gpu': + builder_cls = GPUImageBuilder +elif args.image_type.lower() == 'llm': + builder_cls = LLMImageBuilder +else: + raise ValueError(f'Unsupported image_type: {args.image_type}') + +builder_cls(args, args.dry_run)() diff --git a/docker/install.sh b/docker/install.sh new file mode 100644 index 00000000..cfd05668 --- /dev/null +++ b/docker/install.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +torch_version=${1:-2.4.0} +torchvision_version=${2:-0.19.0} +torchaudio_version=${3:-2.4.0} +vllm_version=${4:-0.6.0} +lmdeploy_version=${5:-0.6.1} +autogptq_version=${6:-0.7.1} +modelscope_branch=${7:-master} +swift_branch=${8:-main} + +pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html + +# curl -fsSL https://ollama.com/install.sh | sh + +pip install --no-cache-dir -U funasr + +pip install --no-cache-dir -U qwen_vl_utils pyav librosa autoawq timm transformers accelerate peft optimum trl safetensors + +pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr + +pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps + +pip install --no-cache-dir mpi4py paint_ldm mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 ipykernel fasttext fairseq deepspeed apex -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html + +CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6 8.9 9.0" pip install --no-cache-dir 'git+https://github.com/facebookresearch/detectron2.git'; + +# pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.6.3/flash_attn-2.6.3+cu123torch2.4cxx11abiTRUE-cp310-cp310-linux_x86_64.whl +# find on: https://github.com/Dao-AILab/flash-attention/releases +# cd /tmp && git clone https://github.com/Dao-AILab/flash-attention.git && cd flash-attention && python setup.py install && cd / && rm -fr /tmp/flash-attention && pip cache purge; + +pip install --no-cache-dir auto-gptq==$autogptq_version + +pip install --no-cache-dir --force tinycudann==1.7 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html + +# pip uninstall -y torch-scatter && TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5;8.0;8.6;8.9;9.0" pip install --no-cache-dir -U torch-scatter + +pip install --no-cache-dir -U triton + +pip install vllm==$vllm_version -U + +pip install --no-cache-dir -U lmdeploy==$lmdeploy_version --no-deps + +pip install --no-cache-dir -U torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version + +pip uninstall ms-swift modelscope -y + +cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $modelscope_branch --single-branch https://github.com/modelscope/modelscope.git && cd modelscope && pip install .[all] && cd / && rm -fr /tmp/modelscope && pip cache purge; + +cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $swift_branch --single-branch https://github.com/modelscope/ms-swift.git && cd ms-swift && pip install .[all] && cd / && rm -fr /tmp/ms-swift && pip cache purge; diff --git a/docker/install_cpu.sh b/docker/install_cpu.sh new file mode 100644 index 00000000..b9cf48eb --- /dev/null +++ b/docker/install_cpu.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +torch_version=${1:-2.4.0} +torchvision_version=${2:-0.19.0} +torchaudio_version=${3:-2.4.0} +modelscope_branch=${4:-master} +swift_branch=${5:-main} + +pip install --no-cache-dir funtextprocessing typeguard==2.13.3 scikit-learn -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html + +curl -fsSL https://ollama.com/install.sh | sh + +pip install --no-cache-dir -U funasr + +pip install --no-cache-dir -U qwen_vl_utils pyav librosa timm transformers accelerate peft trl safetensors + +pip install --no-cache-dir text2sql_lgesql==1.3.0 git+https://github.com/jin-s13/xtcocoapi.git@v1.14 git+https://github.com/gatagat/lap.git@v0.4.0 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps + +pip install --no-cache-dir mpi4py paint_ldm mmcls>=0.21.0 mmdet>=2.25.0 decord>=0.6.0 ipykernel fasttext fairseq -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html + +pip uninstall torch torchvision torchaudio + +pip install --no-cache-dir -U torch==$torch_version torchvision==$torchvision_version torchaudio==$torchaudio_version --index-url https://download.pytorch.org/whl/cpu + +pip uninstall ms-swift modelscope -y + +cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $modelscope_branch --single-branch https://github.com/modelscope/modelscope.git && cd modelscope && pip install .[all] && cd / && rm -fr /tmp/modelscope && pip cache purge; + +cd /tmp && GIT_LFS_SKIP_SMUDGE=1 git clone -b $swift_branch --single-branch https://github.com/modelscope/ms-swift.git && cd ms-swift && pip install .[all] && cd / && rm -fr /tmp/ms-swift && pip cache purge; diff --git a/docker/jupyter_plugins/jupyterlab_active_log/package.json b/docker/jupyter_plugins/jupyterlab_active_log/package.json deleted file mode 100644 index d2e0d0db..00000000 --- a/docker/jupyter_plugins/jupyterlab_active_log/package.json +++ /dev/null @@ -1,99 +0,0 @@ -{ - "name": "jupyterlab_active_log", - "version": "0.1.0", - "description": "A JupyterLab extension.", - "keywords": [ - "jupyter", - "jupyterlab", - "jupyterlab-extension" - ], - "homepage": "https://github.com/github_username/jupyterlab_active_log", - "bugs": { - "url": "https://github.com/github_username/jupyterlab_active_log/issues" - }, - "license": "BSD-3-Clause", - "files": [ - "lib/**/*.{d.ts,eot,gif,html,jpg,js,js.map,json,png,svg,woff2,ttf}", - "style/**/*.{css,js,eot,gif,html,jpg,json,png,svg,woff2,ttf}" - ], - "main": "lib/index.js", - "types": "lib/index.d.ts", - "style": "style/index.css", - "repository": { - "type": "git", - "url": "https://github.com/github_username/jupyterlab_active_log.git" - }, - "scripts": { - "build": "jlpm build:lib && jlpm build:labextension:dev", - "build:prod": "jlpm clean && jlpm build:lib && jlpm build:labextension", - "build:labextension": "jupyter labextension build .", - "build:labextension:dev": "jupyter labextension build --development True .", - "build:lib": "tsc", - "clean": "jlpm clean:lib", - "clean:lib": "rimraf lib tsconfig.tsbuildinfo", - "clean:lintcache": "rimraf .eslintcache .stylelintcache", - "clean:labextension": "rimraf jupyterlab_active_log/labextension", - "clean:all": "jlpm clean:lib && jlpm clean:labextension && jlpm clean:lintcache", - "eslint": "jlpm eslint:check --fix", - "eslint:check": "eslint . --cache --ext .ts,.tsx", - "install:extension": "jlpm build", - "lint": "jlpm stylelint && jlpm prettier && jlpm eslint", - "lint:check": "jlpm stylelint:check && jlpm prettier:check && jlpm eslint:check", - "prettier": "jlpm prettier:base --write --list-different", - "prettier:base": "prettier \"**/*{.ts,.tsx,.js,.jsx,.css,.json,.md}\"", - "prettier:check": "jlpm prettier:base --check", - "stylelint": "jlpm stylelint:check --fix", - "stylelint:check": "stylelint --cache \"style/**/*.css\"", - "watch": "run-p watch:src watch:labextension", - "watch:src": "tsc -w", - "watch:labextension": "jupyter labextension watch ." - }, - "dependencies": { - "@jupyterlab/application": "^3.1.0" - }, - "devDependencies": { - "@jupyterlab/builder": "^3.1.0", - "@typescript-eslint/eslint-plugin": "^4.8.1", - "@typescript-eslint/parser": "^4.8.1", - "eslint": "^7.14.0", - "eslint-config-prettier": "^6.15.0", - "eslint-plugin-prettier": "^3.1.4", - "npm-run-all": "^4.1.5", - "prettier": "^2.1.1", - "rimraf": "^3.0.2", - "stylelint": "^14.3.0", - "stylelint-config-prettier": "^9.0.3", - "stylelint-config-recommended": "^6.0.0", - "stylelint-config-standard": "~24.0.0", - "stylelint-prettier": "^2.0.0", - "typescript": "~4.1.3" - }, - "sideEffects": [ - "style/*.css", - "style/index.js" - ], - "styleModule": "style/index.js", - "publishConfig": { - "access": "public" - }, - "jupyterlab": { - "extension": true, - "outputDir": "jupyterlab_active_log/labextension", - "_build": { - "load": "static/remoteEntry.eb3177c3791d7658cc12.js", - "extension": "./extension", - "style": "./style" - } - }, - "jupyter-releaser": { - "hooks": { - "before-build-npm": [ - "python -m pip install jupyterlab~=3.1", - "jlpm" - ], - "before-build-python": [ - "jlpm clean:all" - ] - } - } -} diff --git a/docker/jupyter_plugins/jupyterlab_active_log/static/568.a92ae44b87625ab09aed.js b/docker/jupyter_plugins/jupyterlab_active_log/static/568.a92ae44b87625ab09aed.js deleted file mode 100644 index b70adee6..00000000 --- a/docker/jupyter_plugins/jupyterlab_active_log/static/568.a92ae44b87625ab09aed.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkjupyterlab_active_log=self.webpackChunkjupyterlab_active_log||[]).push([[568],{568:(t,e,a)=>{a.r(e),a.d(e,{default:()=>i});const i={id:"jupyterlab_active_log:plugin",autoStart:!0,activate:t=>{console.log("JupyterLab extension jupyterlab_active_log is activated!"),window.consts=Object.assign(Object.assign({},window.consts),{recordUrl:"https://modelscope.cn/api/v1/notebooks/activelog",timerDuration:1e4,timerParams:function(){const t=location.pathname.split("/");let e;return t.length>=2&&(e=t[1]),{site:"dsw",id:e,ext:{pathname:location.pathname}}}});const e=document.body,a=e.insertBefore(document.createElement("script"),e.firstChild);a.setAttribute("id","timer-sdk"),a.setAttribute("src","https://g.alicdn.com/alifanyi/translate-js-sdk/timer.js ")}}}}]); diff --git a/docker/jupyter_plugins/jupyterlab_active_log/static/747.63b4c3d22bfe458b352b.js b/docker/jupyter_plugins/jupyterlab_active_log/static/747.63b4c3d22bfe458b352b.js deleted file mode 100644 index 2129fc3d..00000000 --- a/docker/jupyter_plugins/jupyterlab_active_log/static/747.63b4c3d22bfe458b352b.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkjupyterlab_active_log=self.webpackChunkjupyterlab_active_log||[]).push([[747],{150:(e,n,t)=>{t.d(n,{Z:()=>a});var r=t(645),o=t.n(r)()((function(e){return e[1]}));o.push([e.id,"/*\n See the JupyterLab Developer Guide for useful CSS Patterns:\n\n https://jupyterlab.readthedocs.io/en/stable/developer/css.html\n*/\n",""]);const a=o},645:e=>{e.exports=function(e){var n=[];return n.toString=function(){return this.map((function(n){var t=e(n);return n[2]?"@media ".concat(n[2]," {").concat(t,"}"):t})).join("")},n.i=function(e,t,r){"string"==typeof e&&(e=[[null,e,""]]);var o={};if(r)for(var a=0;a{var r,o=function(){var e={};return function(n){if(void 0===e[n]){var t=document.querySelector(n);if(window.HTMLIFrameElement&&t instanceof window.HTMLIFrameElement)try{t=t.contentDocument.head}catch(e){t=null}e[n]=t}return e[n]}}(),a=[];function i(e){for(var n=-1,t=0;t{t.r(n);var r=t(379),o=t.n(r),a=t(150);o()(a.Z,{insert:"head",singleton:!1}),a.Z.locals}}]); diff --git a/docker/jupyter_plugins/jupyterlab_active_log/static/remoteEntry.eb3177c3791d7658cc12.js b/docker/jupyter_plugins/jupyterlab_active_log/static/remoteEntry.eb3177c3791d7658cc12.js deleted file mode 100644 index ec49e973..00000000 --- a/docker/jupyter_plugins/jupyterlab_active_log/static/remoteEntry.eb3177c3791d7658cc12.js +++ /dev/null @@ -1 +0,0 @@ -var _JUPYTERLAB;(()=>{"use strict";var e,r,t={293:(e,r,t)=>{var o={"./index":()=>t.e(568).then((()=>()=>t(568))),"./extension":()=>t.e(568).then((()=>()=>t(568))),"./style":()=>t.e(747).then((()=>()=>t(747)))},a=(e,r)=>(t.R=r,r=t.o(o,e)?o[e]():Promise.resolve().then((()=>{throw new Error('Module "'+e+'" does not exist in container.')})),t.R=void 0,r),n=(e,r)=>{if(t.S){var o="default",a=t.S[o];if(a&&a!==e)throw new Error("Container initialization failed as it has already been initialized with a different share scope");return t.S[o]=e,t.I(o,r)}};t.d(r,{get:()=>a,init:()=>n})}},o={};function a(e){var r=o[e];if(void 0!==r)return r.exports;var n=o[e]={id:e,exports:{}};return t[e](n,n.exports,a),n.exports}a.m=t,a.c=o,a.n=e=>{var r=e&&e.__esModule?()=>e.default:()=>e;return a.d(r,{a:r}),r},a.d=(e,r)=>{for(var t in r)a.o(r,t)&&!a.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:r[t]})},a.f={},a.e=e=>Promise.all(Object.keys(a.f).reduce(((r,t)=>(a.f[t](e,r),r)),[])),a.u=e=>e+"."+{568:"a92ae44b87625ab09aed",747:"63b4c3d22bfe458b352b"}[e]+".js?v="+{568:"a92ae44b87625ab09aed",747:"63b4c3d22bfe458b352b"}[e],a.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),a.o=(e,r)=>Object.prototype.hasOwnProperty.call(e,r),e={},r="jupyterlab_active_log:",a.l=(t,o,n,i)=>{if(e[t])e[t].push(o);else{var l,u;if(void 0!==n)for(var c=document.getElementsByTagName("script"),d=0;d{l.onerror=l.onload=null,clearTimeout(f);var a=e[t];if(delete e[t],l.parentNode&&l.parentNode.removeChild(l),a&&a.forEach((e=>e(o))),r)return r(o)},f=setTimeout(p.bind(null,void 0,{type:"timeout",target:l}),12e4);l.onerror=p.bind(null,l.onerror),l.onload=p.bind(null,l.onload),u&&document.head.appendChild(l)}},a.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},(()=>{a.S={};var e={},r={};a.I=(t,o)=>{o||(o=[]);var n=r[t];if(n||(n=r[t]={}),!(o.indexOf(n)>=0)){if(o.push(n),e[t])return e[t];a.o(a.S,t)||(a.S[t]={});var i=a.S[t],l="jupyterlab_active_log",u=[];return"default"===t&&((e,r,t,o)=>{var n=i[e]=i[e]||{},u=n[r];(!u||!u.loaded&&(1!=!u.eager?o:l>u.from))&&(n[r]={get:()=>a.e(568).then((()=>()=>a(568))),from:l,eager:!1})})("jupyterlab_active_log","0.1.0"),e[t]=u.length?Promise.all(u).then((()=>e[t]=1)):1}}})(),(()=>{var e;a.g.importScripts&&(e=a.g.location+"");var r=a.g.document;if(!e&&r&&(r.currentScript&&(e=r.currentScript.src),!e)){var t=r.getElementsByTagName("script");t.length&&(e=t[t.length-1].src)}if(!e)throw new Error("Automatic publicPath is not supported in this browser");e=e.replace(/#.*$/,"").replace(/\?.*$/,"").replace(/\/[^\/]+$/,"/"),a.p=e})(),(()=>{var e={346:0};a.f.j=(r,t)=>{var o=a.o(e,r)?e[r]:void 0;if(0!==o)if(o)t.push(o[2]);else{var n=new Promise(((t,a)=>o=e[r]=[t,a]));t.push(o[2]=n);var i=a.p+a.u(r),l=new Error;a.l(i,(t=>{if(a.o(e,r)&&(0!==(o=e[r])&&(e[r]=void 0),o)){var n=t&&("load"===t.type?"missing":t.type),i=t&&t.target&&t.target.src;l.message="Loading chunk "+r+" failed.\n("+n+": "+i+")",l.name="ChunkLoadError",l.type=n,l.request=i,o[1](l)}}),"chunk-"+r,r)}};var r=(r,t)=>{var o,n,[i,l,u]=t,c=0;if(i.some((r=>0!==e[r]))){for(o in l)a.o(l,o)&&(a.m[o]=l[o]);u&&u(a)}for(r&&r(t);c 18.06 with -# experimental enabled and DOCKER_BUILDKIT=1 -# -# If you do not use buildkit you are not going to have a good time -# -# For reference: -# https://docs.docker.com/develop/develop-images/build_enhancements/ - -# ARG BASE_IMAGE=reg.docker.alibaba-inc.com/pai-dlc/pytorch-training:1.10PAI-gpu-py36-cu113-ubuntu18.04 -# FROM ${BASE_IMAGE} as dev-base - -# FROM reg.docker.alibaba-inc.com/pai-dlc/pytorch-training:1.10PAI-gpu-py36-cu113-ubuntu18.04 as dev-base -FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-devel -# FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime -# config pip source -RUN mkdir /root/.pip -COPY docker/rcfiles/pip.conf.tsinghua /root/.pip/pip.conf -COPY docker/rcfiles/sources.list.aliyun /etc/apt/sources.list - -# Install essential Ubuntu packages -RUN apt-get update &&\ - apt-get install -y software-properties-common \ - build-essential \ - git \ - wget \ - vim \ - curl \ - zip \ - zlib1g-dev \ - unzip \ - pkg-config \ - libsndfile1 - -# install modelscope and its python env -WORKDIR /opt/modelscope -COPY . . -RUN pip install -r requirements.txt -# RUN --mount=type=cache,target=/opt/ccache \ -# python setup.py install - -# opencv-python-headless conflict with opencv-python installed -RUN python setup.py install \ - && pip uninstall -y opencv-python-headless - -# prepare modelscope libs -COPY docker/scripts/install_libs.sh /tmp/ -RUN bash /tmp/install_libs.sh && \ - rm -rf /tmp/install_libs.sh - -ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/modelscope/lib64 - -WORKDIR /workspace diff --git a/docker/rcfiles/sources.list.aliyun b/docker/rcfiles/sources.list.aliyun deleted file mode 100644 index 1ebf4ae5..00000000 --- a/docker/rcfiles/sources.list.aliyun +++ /dev/null @@ -1,14 +0,0 @@ -deb https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse -# deb-src https://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse - -deb https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse -# deb-src https://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse - -deb https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse -# deb-src https://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse - -# deb https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse -# deb-src https://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse - -deb https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse -# deb-src https://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse diff --git a/docker/rcfiles/ubuntu20.04_sources.tuna b/docker/rcfiles/ubuntu20.04_sources.tuna deleted file mode 100644 index a247bbfa..00000000 --- a/docker/rcfiles/ubuntu20.04_sources.tuna +++ /dev/null @@ -1,13 +0,0 @@ -# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释 -deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse -# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted universe multiverse -deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse -# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted universe multiverse -deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse -# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse -deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse -# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted universe multiverse - -# 预发布软件源,不建议启用 -# deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse -# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-proposed main restricted universe multiverse diff --git a/docker/rcfiles/user.vimrc b/docker/rcfiles/user.vimrc deleted file mode 100644 index 590aca43..00000000 --- a/docker/rcfiles/user.vimrc +++ /dev/null @@ -1,10 +0,0 @@ -set nocompatible -set encoding=utf-8 -set hlsearch -set smartindent -set ruler -set number -set ts=2 -set sw=2 -set expandtab -autocmd FileType make setlocal noexpandtab diff --git a/docker/scripts/install_flash_attension.sh b/docker/scripts/install_flash_attension.sh deleted file mode 100644 index 6413cca9..00000000 --- a/docker/scripts/install_flash_attension.sh +++ /dev/null @@ -1,4 +0,0 @@ - git clone -b v2.3.3 https://github.com/Dao-AILab/flash-attention && \ - cd flash-attention && MAX_JOBS=46 python setup.py install && \ - cd .. && \ - rm -rf flash-attention diff --git a/docker/scripts/torch111_torch3d_nvdiffrast.sh b/docker/scripts/torch111_torch3d_nvdiffrast.sh deleted file mode 100644 index ca86b0cc..00000000 --- a/docker/scripts/torch111_torch3d_nvdiffrast.sh +++ /dev/null @@ -1,14 +0,0 @@ -export CMAKE_BUILD_PARALLEL_LEVEL=36 && export MAX_JOBS=4 && export CMAKE_CUDA_ARCHITECTURES="50;52;60;61;70;75;80;86" \ - && pip install --no-cache-dir fvcore iopath \ - && curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz \ - && tar xzf 1.10.0.tar.gz \ - && export CUB_HOME=$PWD/cub-1.10.0 \ - && pip install "git+https://github.com/facebookresearch/pytorch3d.git@stable" \ - && rm -fr 1.10.0.tar.gz cub-1.10.0 \ - && apt-get update \ - && apt-get install -y --no-install-recommends pkg-config libglvnd0 libgl1 libglx0 libegl1 libgles2 libglvnd-dev libgl1-mesa-dev libegl1-mesa-dev libgles2-mesa-dev -y \ - && git clone https://github.com/NVlabs/nvdiffrast.git \ - && cd nvdiffrast \ - && pip install --no-cache-dir . \ - && cd .. \ - && rm -rf nvdiffrast