From a5c481fedd8c26e235025359fa32a7c786fc3ed3 Mon Sep 17 00:00:00 2001 From: Lilac09 <74996885+Zhengjin-Wang@users.noreply.github.com> Date: Mon, 18 Dec 2023 09:00:22 +0800 Subject: [PATCH] add chat.py denpendency in Dockerfile (#9699) --- docker/llm/README.md | 2 +- docker/llm/inference/cpu/docker/Dockerfile | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/llm/README.md b/docker/llm/README.md index 80e2f72f..e6c3aa53 100644 --- a/docker/llm/README.md +++ b/docker/llm/README.md @@ -73,7 +73,7 @@ You can download models and bind the model directory from host machine to contai After entering the container through `docker exec`, you can run chat.py by: ```bash -cd /llm +cd /llm/portable-zip python chat.py --model-path YOUR_MODEL_PATH ``` If your model is chatglm-6b and mounted on /llm/models, you can excute: diff --git a/docker/llm/inference/cpu/docker/Dockerfile b/docker/llm/inference/cpu/docker/Dockerfile index 2234b481..154e2129 100644 --- a/docker/llm/inference/cpu/docker/Dockerfile +++ b/docker/llm/inference/cpu/docker/Dockerfile @@ -30,12 +30,12 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \ pip install --upgrade jupyterlab && \ git clone https://github.com/intel-analytics/bigdl-llm-tutorial && \ chmod +x /llm/start-notebook.sh && \ -# Download chat.py script - pip install --upgrade colorama && \ - wget -P /llm https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/portable-zip/chat.py && \ # Download all-in-one benchmark git clone https://github.com/intel-analytics/BigDL && \ cp -r ./BigDL/python/llm/dev/benchmark/ ./benchmark && \ +# Copy chat.py script + pip install --upgrade colorama && \ + cp -r ./BigDL/python/llm/portable-zip/ ./portable-zip && \ # Install all-in-one dependencies apt-get install -y numactl && \ pip install --upgrade omegaconf && \