add bigdl-llm-tutorial into llm-inference-cpu image (#9139)
* add bigdl-llm-tutorial into llm-inference-cpu image * modify Dockerfile * modify Dockerfile
This commit is contained in:
		
							parent
							
								
									65dd73b62e
								
							
						
					
					
						commit
						e02fbb40cc
					
				
					 3 changed files with 45 additions and 5 deletions
				
			
		| 
						 | 
				
			
			@ -4,6 +4,10 @@ ARG http_proxy
 | 
			
		|||
ARG https_proxy
 | 
			
		||||
ARG PIP_NO_CACHE_DIR=false
 | 
			
		||||
 | 
			
		||||
ENV PYTHONUNBUFFERED=1
 | 
			
		||||
 | 
			
		||||
ADD ./start-notebook.sh /llm/start-notebook.sh
 | 
			
		||||
 | 
			
		||||
# Install PYTHON 3.9
 | 
			
		||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \
 | 
			
		||||
    apt install software-properties-common libunwind8-dev vim less -y && \
 | 
			
		||||
| 
						 | 
				
			
			@ -21,9 +25,13 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \
 | 
			
		|||
    pip3 install --no-cache-dir --upgrade torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu && \
 | 
			
		||||
    pip install --pre --upgrade bigdl-llm[all] && \
 | 
			
		||||
    pip install --pre --upgrade bigdl-nano && \
 | 
			
		||||
# Download bigdl-llm-tutorial
 | 
			
		||||
    cd /llm && \
 | 
			
		||||
    pip install --upgrade jupyterlab && \
 | 
			
		||||
    git clone https://github.com/intel-analytics/bigdl-llm-tutorial && \
 | 
			
		||||
    chmod +x /llm/start-notebook.sh && \
 | 
			
		||||
# Download chat.py script
 | 
			
		||||
    pip install --upgrade colorama && \
 | 
			
		||||
    wget -P /root https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/portable-zip/chat.py && \
 | 
			
		||||
    export PYTHONUNBUFFERED=1
 | 
			
		||||
    wget -P /llm https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/portable-zip/chat.py
 | 
			
		||||
 | 
			
		||||
ENTRYPOINT ["/bin/bash"]
 | 
			
		||||
| 
						 | 
				
			
			@ -35,7 +35,7 @@ To run inference using `BigDL-LLM` using cpu, you could refer to this [documenta
 | 
			
		|||
 | 
			
		||||
### Use chat.py
 | 
			
		||||
 | 
			
		||||
chat.py can be used to initiate a conversation with a specified model. The file is under directory '/root'.
 | 
			
		||||
chat.py can be used to initiate a conversation with a specified model. The file is under directory '/llm'.
 | 
			
		||||
 | 
			
		||||
You can download models and bind the model directory from host machine to container when start a container.
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -58,11 +58,11 @@ sudo docker run -itd \
 | 
			
		|||
 | 
			
		||||
After entering the container through `docker exec`, you can run chat.py by:
 | 
			
		||||
```bash
 | 
			
		||||
cd /root
 | 
			
		||||
cd /llm
 | 
			
		||||
python chat.py --model-path YOUR_MODEL_PATH
 | 
			
		||||
```
 | 
			
		||||
In the example above, it can be:
 | 
			
		||||
```bash
 | 
			
		||||
cd /root
 | 
			
		||||
cd /llm
 | 
			
		||||
python chat.py --model-path /llm/models/MODEL_NAME
 | 
			
		||||
```
 | 
			
		||||
							
								
								
									
										32
									
								
								docker/llm/inference/cpu/docker/start-notebook.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								docker/llm/inference/cpu/docker/start-notebook.sh
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,32 @@
 | 
			
		|||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Copyright 2016 The BigDL Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
#
 | 
			
		||||
#set -x
 | 
			
		||||
port=${port:-12345}
 | 
			
		||||
token=${token:-""}
 | 
			
		||||
 | 
			
		||||
while [ $# -gt 0 ]; do
 | 
			
		||||
 | 
			
		||||
   if [[ $1 == *"--"* ]]; then
 | 
			
		||||
        param="${1/--/}"
 | 
			
		||||
        declare $param="$2"
 | 
			
		||||
   fi
 | 
			
		||||
 | 
			
		||||
  shift
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
jupyter-lab --notebook-dir=/llm/bigdl-llm-tutorial --ip=0.0.0.0 --port=$port --no-browser --NotebookApp.token=$token --allow-root
 | 
			
		||||
		Loading…
	
		Reference in a new issue