Fix SDLe CT222 Vulnerabilities (#11237)

* fix ct222 vuln

* update

* fix

* update ENTRYPOINT

* revert ENTRYPOINT

* Fix CT222 Vulns

* fix

* revert changes

* fix

* revert

* add sudo permission to ipex-llm user

* do not use ipex-llm user
This commit is contained in:
Shaojun Liu 2024-06-13 15:31:22 +08:00 committed by GitHub
parent bfab294f08
commit 9760ffc256
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 63 additions and 68 deletions

View file

@ -5,22 +5,23 @@ ENV TZ=Asia/Shanghai
ARG PIP_NO_CACHE_DIR=false
# retrive oneapi repo public key
RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " > /etc/apt/sources.list.d/oneAPI.list && \
# retrive intel gpu driver repo public key
wget -qO - https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg && \
echo 'deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc' | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
rm /etc/apt/sources.list.d/intel-graphics.list && \
wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
chmod 644 /usr/share/keyrings/intel-graphics.gpg && \
# update dependencies
apt-get update && \
# install basic dependencies
apt-get install -y curl wget git gnupg gpg-agent libunwind8-dev vim less && \
apt-get install -y --no-install-recommends curl wget git gnupg gpg-agent libunwind8-dev vim less && \
# install Intel GPU driver
apt-get install -y intel-opencl-icd intel-level-zero-gpu level-zero level-zero-dev --allow-downgrades && \
apt-get install -y --no-install-recommends intel-opencl-icd intel-level-zero-gpu level-zero level-zero-dev --allow-downgrades && \
# install python 3.11
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \
env DEBIAN_FRONTEND=noninteractive apt-get update && \
apt-get install -y python3.11 python3-pip python3.11-dev python3-wheel python3.11-distutils && \
apt-get install -y --no-install-recommends python3.11 python3-pip python3.11-dev python3-wheel python3.11-distutils && \
# avoid axolotl lib conflict
apt-get remove -y python3-blinker && apt autoremove -y && \
# link to python 3.11
@ -30,7 +31,7 @@ RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P
# remove apt cache
rm -rf /var/lib/apt/lists/* && \
# upgrade pip
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py && \
python3 get-pip.py && \
# install XPU ipex-llm
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ && \

View file

@ -17,17 +17,17 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
chmod 644 /usr/share/keyrings/intel-graphics.gpg && \
apt-get update && \
apt-get install -y curl wget git gnupg gpg-agent sudo && \
apt-get install -y --no-install-recommends curl wget git gnupg gpg-agent sudo && \
# Install PYTHON 3.11 and IPEX-LLM[xpu]
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \
env DEBIAN_FRONTEND=noninteractive apt-get update && \
apt install libunwind8-dev vim less -y && \
apt-get install -y python3.11 git curl wget && \
apt-get install -y --no-install-recommends python3.11 git curl wget && \
rm /usr/bin/python3 && \
ln -s /usr/bin/python3.11 /usr/bin/python3 && \
ln -s /usr/bin/python3 /usr/bin/python && \
apt-get install -y python3-pip python3.11-dev python3-wheel python3.11-distutils && \
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
apt-get install -y --no-install-recommends python3-pip python3.11-dev python3-wheel python3.11-distutils && \
wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py && \
python3 get-pip.py && \
rm get-pip.py && \
pip install --upgrade requests argparse urllib3 && \
@ -37,27 +37,24 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO
pip install transformers_stream_generator einops tiktoken && \
# Install opencl-related repos
apt-get update && \
apt-get install -y intel-opencl-icd intel-level-zero-gpu=1.3.26241.33-647~22.04 level-zero level-zero-dev --allow-downgrades && \
apt-get install -y --no-install-recommends intel-opencl-icd intel-level-zero-gpu=1.3.26241.33-647~22.04 level-zero level-zero-dev --allow-downgrades && \
# install nodejs and npm and get webui
apt purge nodejs -y && \
apt purge libnode-dev -y && \
apt autoremove -y && \
apt clean -y && \
curl -sL https://deb.nodesource.com/setup_18.x | sudo -E bash - && \
wget -qO- https://deb.nodesource.com/setup_18.x | sudo -E bash - && \
apt install -y nodejs && \
mkdir -p /llm/scripts && cd /llm && \
git clone https://github.com/open-webui/open-webui.git && \
cd /llm/open-webui/ && \
git checkout e29a999dc910afad91995221cb4bb7c274f87cd6 && \
cp -RPp .env.example .env && \
git clone https://github.com/open-webui/open-webui.git /llm/open-webui && \
git -C /llm/open-webui checkout e29a999dc910afad91995221cb4bb7c274f87cd6 && \
cp -RPp /llm/open-webui/.env.example /llm/open-webui/.env && \
# Build frontend
npm i && \
npm run build && \
npm --prefix /llm/open-webui i && \
npm --prefix /llm/open-webui run build && \
# Install Dependencies
cd ./backend && \
# remove blinker to avoid error
find /usr/lib/python3/dist-packages/ -name 'blinker*' -exec rm -rf {} + && \
pip install -r requirements.txt -U && \
pip install -r /llm/open-webui/backend/requirements.txt -U && \
rm -rf /root/.cache/Cypress && \
pip uninstall -y gunicorn python-jose PyMySQL

View file

@ -12,14 +12,14 @@ COPY ./start-notebook.sh /llm/start-notebook.sh
# Update the software sources
RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \
# Install essential packages
apt install libunwind8-dev vim less -y && \
apt-get install -y --no-install-recommends libunwind8-dev vim less && \
# Install git, curl, and wget
apt-get install -y git curl wget && \
apt-get install -y --no-install-recommends git curl wget && \
# Install Python 3.11
# Install Python 3.11
apt-get install -y python3.11 && \
apt-get install -y --no-install-recommends python3.11 && \
# Install Python 3.11 development and utility packages
apt-get install -y python3-pip python3.11-dev python3-wheel python3.11-distutils && \
apt-get install -y --no-install-recommends python3-pip python3.11-dev python3-wheel python3.11-distutils && \
# Remove the original /usr/bin/python3 symbolic link
rm /usr/bin/python3 && \
# Create a symbolic link pointing to Python 3.11 at /usr/bin/python3
@ -32,18 +32,17 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \
rm get-pip.py && \
pip install --upgrade requests argparse urllib3 && \
# Download ipex-llm-tutorial
cd /llm && \
pip install --upgrade jupyterlab && \
git clone https://github.com/intel-analytics/ipex-llm-tutorial && \
git clone https://github.com/intel-analytics/ipex-llm-tutorial /llm/ipex-llm-tutorial && \
chmod +x /llm/start-notebook.sh && \
# Download all-in-one benchmark
git clone https://github.com/intel-analytics/IPEX-LLM && \
cp -r ./IPEX-LLM/python/llm/dev/benchmark/ ./benchmark && \
cp -r ./IPEX-LLM/python/llm/dev/benchmark/ /llm/benchmark && \
# Copy chat.py script
pip install --upgrade colorama && \
cp -r ./IPEX-LLM/python/llm/portable-zip/ ./portable-zip && \
cp -r ./IPEX-LLM/python/llm/portable-zip/ /llm/portable-zip && \
# Install all-in-one dependencies
apt-get install -y numactl && \
apt-get install -y --no-install-recommends numactl && \
pip install --upgrade omegaconf && \
pip install --upgrade pandas && \
# Install vllm dependencies
@ -52,12 +51,11 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \
# Add Qwen support
pip install --upgrade transformers_stream_generator einops && \
# Copy vLLM-Serving
cp -r ./IPEX-LLM/python/llm/example/CPU/vLLM-Serving/ ./vLLM-Serving && \
cp -r ./IPEX-LLM/python/llm/example/CPU/vLLM-Serving/ /llm/vLLM-Serving && \
rm -rf ./IPEX-LLM && \
# Fix vllm service
pip install pydantic==1.10.11 && \
# Install ipex-llm
cd /llm && \
pip install --pre --upgrade ipex-llm[all] && \
# Fix CVE-2024-22195
pip install Jinja2==3.1.3 && \
@ -65,5 +63,5 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get update && \
pip install intel-extension-for-pytorch==2.2.0 && \
pip install oneccl_bind_pt==2.2.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/ && \
pip install transformers==4.36.2
ENTRYPOINT ["/bin/bash"]

View file

@ -22,17 +22,17 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
chmod 644 /usr/share/keyrings/intel-graphics.gpg && \
apt-get update && \
apt-get install -y curl wget git gnupg gpg-agent && \
apt-get install -y --no-install-recommends curl wget git gnupg gpg-agent && \
# Install PYTHON 3.11 and IPEX-LLM[xpu]
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \
env DEBIAN_FRONTEND=noninteractive apt-get update && \
apt install libunwind8-dev vim less -y && \
apt-get install -y python3.11 git curl wget && \
apt-get install -y --no-install-recommends python3.11 git curl wget && \
rm /usr/bin/python3 && \
ln -s /usr/bin/python3.11 /usr/bin/python3 && \
ln -s /usr/bin/python3 /usr/bin/python && \
apt-get install -y python3-pip python3.11-dev python3-wheel python3.11-distutils && \
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
apt-get install -y --no-install-recommends python3-pip python3.11-dev python3-wheel python3.11-distutils && \
wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py && \
# Install FastChat from source requires PEP 660 support
python3 get-pip.py && \
rm get-pip.py && \
@ -43,7 +43,7 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO
pip install transformers_stream_generator einops tiktoken && \
# Install opencl-related repos
apt-get update && \
apt-get install -y intel-opencl-icd intel-level-zero-gpu level-zero && \
apt-get install -y --no-install-recommends intel-opencl-icd intel-level-zero-gpu level-zero && \
# Install related libary of chat.py
pip install --upgrade colorama && \
# Download all-in-one benchmark and examples
@ -66,8 +66,9 @@ RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRO
pip install git+https://github.com/intel/intel-extension-for-deepspeed.git@0eb734b && \
pip install mpi4py && \
apt-get update && \
apt-get install -y google-perftools && \
apt-get install -y --no-install-recommends google-perftools && \
ln -s /usr/local/lib/python3.11/dist-packages/ipex_llm/libs/libtcmalloc.so /lib/libtcmalloc.so && \
rm -rf ./ipex-llm
WORKDIR /llm/

View file

@ -8,17 +8,18 @@ ARG TINI_VERSION=v0.18.0
ARG PIP_NO_CACHE_DIR=false
COPY ./model_adapter.py.patch /llm/model_adapter.py.patch
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /sbin/tini
# Install Serving Dependencies
RUN cd /llm && \
RUN wget -qO /sbin/tini https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini && \
chmod +x /sbin/tini && \
cd /llm && \
apt-get update && \
apt-get install -y wrk && \
apt-get install -y --no-install-recommends wrk && \
pip install --pre --upgrade ipex-llm[serving] && \
# Fix Trivy CVE Issues
pip install Jinja2==3.1.3 transformers==4.36.2 gradio==4.19.2 cryptography==42.0.4 && \
# Fix Qwen model adpater in fastchat
# Fix Qwen model adapter in fastchat
patch /usr/local/lib/python3.11/dist-packages/fastchat/model/model_adapter.py < /llm/model_adapter.py.patch && \
chmod +x /sbin/tini && \
cp /sbin/tini /usr/bin/tini && \
# Install vllm
git clone https://github.com/vllm-project/vllm.git && \
@ -28,10 +29,11 @@ RUN cd /llm && \
pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu && \
VLLM_TARGET_DEVICE=cpu python3 setup.py install
ADD ./vllm_offline_inference.py /llm/
ADD ./payload-1024.lua /llm/
ADD ./start-vllm-service.sh /llm/
ADD ./benchmark_vllm_throughput.py /llm/
ADD ./start-fastchat-service.sh /llm/
COPY ./vllm_offline_inference.py /llm/
COPY ./payload-1024.lua /llm/
COPY ./start-vllm-service.sh /llm/
COPY ./benchmark_vllm_throughput.py /llm/
COPY ./start-fastchat-service.sh /llm/
WORKDIR /llm/

View file

@ -6,31 +6,27 @@ ARG https_proxy
# Disable pip's cache behavior
ARG PIP_NO_CACHE_DIR=false
# Install Serving Dependencies
RUN cd /llm &&\
# Install ipex-llm[serving] only will update ipex_llm source code without updating
# bigdl-core-xe, which will lead to problems
apt-get update && \
apt-get install -y libfabric-dev wrk libaio-dev && \
# Install ipex-llm[serving] only will update ipex_llm source code without updating
# bigdl-core-xe, which will lead to problems
RUN apt-get update && \
apt-get install -y --no-install-recommends libfabric-dev wrk libaio-dev && \
pip install --pre --upgrade ipex-llm[xpu,serving] && \
pip install transformers==4.37.0 gradio==4.19.2 && \
# Install vLLM-v2 dependencies
cd /llm && \
git clone -b sycl_xpu https://github.com/analytics-zoo/vllm.git && \
cd vllm && \
pip install -r requirements-xpu.txt && \
git clone -b sycl_xpu https://github.com/analytics-zoo/vllm.git /llm/vllm && \
pip install -r /llm/vllm/requirements-xpu.txt && \
pip install --no-deps xformers && \
VLLM_BUILD_XPU_OPS=1 pip install --no-build-isolation -v -e . && \
VLLM_BUILD_XPU_OPS=1 pip install --no-build-isolation -v -e /llm/vllm && \
pip install outlines==0.0.34 --no-deps && \
pip install interegular cloudpickle diskcache joblib lark nest-asyncio numba scipy && \
# For Qwen series models support
pip install transformers_stream_generator einops tiktoken
ADD ./vllm_offline_inference.py /llm/
ADD ./payload-1024.lua /llm/
ADD ./start-vllm-service.sh /llm/
ADD ./benchmark_vllm_throughput.py /llm/
ADD ./start-fastchat-service.sh /llm/
COPY ./vllm_offline_inference.py /llm/
COPY ./payload-1024.lua /llm/
COPY ./start-vllm-service.sh /llm/
COPY ./benchmark_vllm_throughput.py /llm/
COPY ./start-fastchat-service.sh /llm/
WORKDIR /llm/