Merge branch 'release/1.11' of gitlab.alibaba-inc.com:Ali-MaaS/MaaS-lib into release/1.11

This commit is contained in:
ly119399
2024-01-05 14:07:47 +08:00
2 changed files with 3 additions and 3 deletions

View File

@@ -190,7 +190,7 @@ printf "$docker_file_content" > Dockerfile
while true
do
DOCKER_BUILDKIT=0 docker build -t $IMAGE_TO_BUILD \
docker build --progress=plain -t $IMAGE_TO_BUILD \
--build-arg USE_GPU \
--build-arg BASE_IMAGE \
--build-arg PYTHON_VERSION \

View File

@@ -1,7 +1,7 @@
ARG BASE_IMAGE=reg.docker.alibaba-inc.com/modelscope/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-base
FROM $BASE_IMAGE
RUN apt-get update && \
apt-get install -y libsox-dev unzip zip iputils-ping telnet && \
apt-get install -y libsox-dev unzip zip iputils-ping telnet sudo && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
@@ -38,7 +38,7 @@ RUN if [ "$USE_GPU" = "True" ] ; then \
pip install --no-cache-dir torchsde jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator bitsandbytes basicsr optimum && \
pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu121/ && \
pip install --no-cache-dir -U xformers --index-url https://download.pytorch.org/whl/cu121 && \
pip install --no-cache-dir flash_attn vllm; \
pip install --no-cache-dir -U flash_attn vllm; \
else \
echo 'cpu unsupport vllm auto-gptq'; \
fi