Update setup.py and add new actions and add compatible mode (#25)

* update setup.py

* add new action

* add compatible mode
This commit is contained in:
Wang, Jian4 2024-03-22 15:44:59 +08:00 committed by GitHub
parent 9df70d95eb
commit a1048ca7f6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 188 additions and 27 deletions

View file

@ -0,0 +1,46 @@
name: "llm-cli Flow Verification (Linux)"
description: "Verify the llm-cli flow on linux"
runs:
using: "composite"
steps:
- name: Test llama llm-cli
shell: bash
run: |
llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -p 'Once upon a time,'
timeout 30s llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -i -p \
'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true
if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then
exit 1
fi
rm test.out
- name: Test gptneox llm-cli
shell: bash
run: |
llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,'
timeout 30s llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -i -p \
'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true
if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then
exit 1
fi
rm test.out
- name: Test bloom llm-cli
shell: bash
run: |
llm-cli -t $THREAD_NUM -n 256 -x bloom -m $BLOOM_INT4_CKPT_PATH -p 'Once upon a time,'
- name: Test starcoder llm-cli
shell: bash
run: |
llm-cli -t $THREAD_NUM -n 256 -x starcoder -m $STARCODER_INT4_CKPT_PATH -p 'def check_odd('
# - name: Test chatglm llm-cli
# shell: bash
# run: |
# llm-cli -t $THREAD_NUM -n 256 -x chatglm -m $CHATGLM_INT4_CKPT_PATH -p '你好'

View file

@ -0,0 +1,25 @@
name: "llm-cli Flow Verification (Windows)"
description: "Verify the llm-cli flow on Windows"
runs:
using: "composite"
steps:
- name: Test llama llm-cli
shell: powershell
run: |
llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x llama -m $env:LLAMA_INT4_CKPT_PATH -p 'Once upon a time,'
- name: Test gptneox llm-cli
shell: powershell
run: |
llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x gptneox -m $env:GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,'
- name: Test bloom llm-cli
shell: powershell
run: |
llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x bloom -m $env:BLOOM_INT4_CKPT_PATH -p 'Once upon a time,'
# - name: Test starcoder llm-cli
# shell: powershell
# run: |
# llm-cli.ps1 -t $env:THREAD_NUM -x starcoder -m $env:STARCODER_INT4_CKPT_PATH -p 'def check_odd('

View file

@ -0,0 +1,43 @@
name: "BigDL-LLM convert tests"
description: "BigDL-LLM convert test, including downloading original models"
runs:
using: "composite"
steps:
- name: Download original models (LLaMA)
shell: bash
run: |
if [ ! -d $LLAMA_ORIGIN_PATH ]; then
echo "Directory $LLAMA_ORIGIN_PATH not found. Downloading from FTP server..."
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR
fi
- name: Download original models (GPT-NeoX)
shell: bash
run: |
if [ ! -d $GPTNEOX_ORIGIN_PATH ]; then
echo "Directory $GPTNEOX_ORIGIN_PATH not found. Downloading from FTP server..."
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gptneox-7b-redpajama-bf16 -P $ORIGIN_DIR
fi
- name: Download original models (BLOOM)
shell: bash
run: |
if [ ! -d $BLOOM_ORIGIN_PATH ]; then
echo "Directory $BLOOM_ORIGIN_PATH not found. Downloading from FTP server..."
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/bloomz-7b1 -P $ORIGIN_DIR
fi
- name: Download original models (StarCoder)
shell: bash
run: |
if [ ! -d $STARCODER_ORIGIN_PATH ]; then
echo "Directory $STARCODER_ORIGIN_PATH not found. Downloading from FTP server..."
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gpt_bigcode-santacoder -P $ORIGIN_DIR
fi
- name: Convert test
shell: bash
run: |
echo "Running the convert models tests..."
python -m pytest -s python/llm/test/convert/test_convert_model.py

View file

@ -0,0 +1,14 @@
name: 'BigDL-LLM example tests'
description: 'BigDL-LLM example tests'
runs:
using: "composite"
steps:
- name: Test LLAMA2
shell: bash
env:
INT4_CKPT_DIR: ./llm/ggml-actions/stable
LLM_DIR: ./llm
run: |
bash python/llm/dev/test/run-example-tests.sh

21
apps/ipynb2py.sh Normal file
View file

@ -0,0 +1,21 @@
#!/bin/bash
## Usage ################################
# ./ipynb2py <file-name without extension>
# Example:
# ipynb2py notebooks/neural_networks/rnn
#########################################
if [ $# -ne "1" ]; then
echo "Usage: ./nb2script <file-name without extension>"
else
cp $1.ipynb $1.tmp.ipynb
sed -i 's/%%/#/' $1.tmp.ipynb
sed -i 's/%pylab/#/' $1.tmp.ipynb
jupyter nbconvert $1.tmp.ipynb --to python
mv $1.tmp.py $1.py
sed -i '1i# -*- coding: utf-8 -*-' $1.py
sed -i '#!/usr/bin/python' $1.py
rm $1.tmp.ipynb
fi

View file

@ -50,10 +50,10 @@ def check_torch_version():
except:
print("PyTorch is not installed.")
def check_bigdl_version():
def check_ipex_llm_version():
import os
if os.system("pip show bigdl-llm")!=0:
print("BigDL is not installed")
if os.system("pip show ipex-llm")!=0:
print("ipex-llm is not installed")
def check_ipex_version():
@ -71,7 +71,7 @@ def main():
print("-----------------------------------------------------------------")
check_torch_version()
print("-----------------------------------------------------------------")
check_bigdl_version()
check_ipex_llm_version()
print("-----------------------------------------------------------------")
check_ipex_version()

View file

@ -51,11 +51,11 @@ check_torch()
fi
}
check_bigdl()
check_ipex_llm()
{
echo "-----------------------------------------------------------------"
echo -n 'BigDL '
pip show bigdl-llm | grep Version:
echo -n 'ipex-llm '
pip show ipex-llm | grep Version:
}
check_cpu_info()
@ -135,10 +135,10 @@ main()
exit -1
fi
# check site packages version, such as transformers, pytorch, bigdl
# check site packages version, such as transformers, pytorch, ipex_llm
check_transformers
check_torch
check_bigdl
check_ipex_llm
check_ipex
# verify hardware (how many gpu availables, gpu status, cpu info, memory info, etc.)

View file

@ -1,9 +1,9 @@
#!/bin/bash
## Usage #############################
# source bigdl-llm-init
# source ipex-llm-init
# Example:
# source bigdl-llm-init
# source ipex-llm-init
######################################
function enable_iomp {
@ -60,10 +60,10 @@ function display-var {
}
function display-help {
echo "Usage: source bigdl-llm-init [-o] [--option]"
echo "Usage: source ipex-llm-init [-o] [--option]"
echo ""
echo "bigdl-llm-init is a tool to automatically configure and run the subcommand under"
echo "environment variables for accelerating BigDL-LLM."
echo "ipex-llm-init is a tool to automatically configure and run the subcommand under"
echo "environment variables for accelerating IPEX-LLM."
echo ""
echo "Optional options:"
echo " -h, --help Display this help message and exit."
@ -157,25 +157,25 @@ done
shift $((OPTIND -1))
# Find bigdl-llm-init dir
# Find ipex-llm-init dir
if [ ! -z $BASH_SOURCE ]; then
# using bash
if [ "$BASH_SOURCE" = "$0" ]; then
echo "Error: Incorrect usage: bigdl-llm-init must be sourced."
echo "Error: Incorrect usage: ipex-llm-init must be sourced."
exit 1
fi
BIN_DIR="$(dirname $BASH_SOURCE)"
else
# using zsh
if [ "$zsh_eval_context" = "toplevel" ]; then
echo "Error: Incorrect usage: bigdl-llm-init must be sourced."
echo "Error: Incorrect usage: ipex-llm-init must be sourced."
exit 1
fi
BIN_DIR="$(dirname ${(%):-%N})"
fi
LIB_DIR=$(dirname ${BIN_DIR})/lib
LLM_DIR=$(dirname $(python3 -c "import bigdl; print(bigdl.__file__)"))/llm
LLM_DIR=$(dirname $(python3 -c "import ipex_llm; print(ipex_llm.__file__)"))
if [ "${ENABLE_IOMP}" -eq 1 ]; then
file="${LIB_DIR}/libiomp5.so"

View file

@ -39,16 +39,16 @@ import copy
from setuptools import setup
long_description = '''
BigDL LLM
IPEX LLM
'''
exclude_patterns = ["*__pycache__*", "*ipynb_checkpoints*"]
BIGDL_PYTHON_HOME = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VERSION = open(os.path.join(BIGDL_PYTHON_HOME,
'version.txt'), 'r').read().strip()
IPEX_LLM_PYTHON_HOME = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VERSION = open(os.path.join(IPEX_LLM_PYTHON_HOME,
'./llm/version.txt'), 'r').read().strip()
llm_home = os.path.join(os.path.dirname(os.path.abspath(__file__)), "src")
github_artifact_dir = os.path.join(llm_home, '../llm-binary')
libs_dir = os.path.join(llm_home, "bigdl", "llm", "libs")
libs_dir = os.path.join(llm_home, "ipex_llm", "libs")
CONVERT_DEP = ['numpy == 1.26.4', # lastet 2.0.0b1 will cause error
'torch',
'transformers == 4.31.0', 'sentencepiece', 'tokenizers == 0.13.3',
@ -145,7 +145,7 @@ ext_libs = [
def get_llm_packages():
llm_packages = []
for dirpath, _, _ in os.walk(os.path.join(llm_home, "bigdl")):
for dirpath, _, _ in os.walk(os.path.join(llm_home, "ipex_llm")):
print(dirpath)
package = dirpath.split(llm_home + os.sep)[1].replace(os.sep, '.')
if any(fnmatch.fnmatchcase(package, pat=pattern)
@ -299,7 +299,7 @@ def setup_package():
metadata = dict(
name='bigdl-llm',
name='ipex_llm',
version=VERSION,
description='Large Language Model Develop Toolkit',
long_description=long_description,
@ -330,8 +330,8 @@ def setup_package():
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython'],
scripts={
'Linux': ['src/bigdl/llm/cli/llm-cli', 'src/bigdl/llm/cli/llm-chat', 'scripts/bigdl-llm-init'],
'Windows': ['src/bigdl/llm/cli/llm-cli.ps1', 'src/bigdl/llm/cli/llm-chat.ps1'],
'Linux': ['src/ipex_llm/cli/llm-cli', 'src/ipex_llm/cli/llm-chat', 'scripts/ipex-llm-init'],
'Windows': ['src/ipex_llm/cli/llm-cli.ps1', 'src/ipex_llm/cli/llm-chat.ps1'],
}[platform_name],
platforms=['windows']
)

View file

@ -23,6 +23,8 @@ from .convert_model import llm_convert
from .optimize import optimize_model
import os
from .llm_patching import llm_patch, llm_unpatch
import sys
import types
# Default is false, set to true to auto importing Intel Extension for PyTorch.
BIGDL_IMPORT_IPEX = os.getenv("BIGDL_IMPORT_IPEX", 'True').lower() in ('true', '1', 't')
@ -30,3 +32,12 @@ if BIGDL_IMPORT_IPEX:
# Import Intel Extension for PyTorch as ipex if XPU version is installed
from .utils.ipex_importer import ipex_importer
ipex_importer.import_ipex()
# Default is true, set to true to auto patching bigdl-llm to ipex_llm.
BIGDL_COMPATIBLE_MODE = os.getenv("BIGDL_COMPATIBLE_MODE", 'True').lower() in ('true', '1', 't')
if BIGDL_COMPATIBLE_MODE:
# Make users' application with previous bigdl-llm could run easily through this patch
# Avoid ModuleNotFoundError of 'bigdl', map 'bigdl' to a dummy module
sys.modules['bigdl'] = types.ModuleType('_ipex_llm_dummy')
# Map 'bigdl.llm' to 'ipex_llm'
sys.modules['bigdl.llm'] = sys.modules['ipex_llm']

1
python/llm/version.txt Normal file
View file

@ -0,0 +1 @@
2.1.0.dev0