LLM: add cpp option in setup.py (#10403)

* add llama_cpp option

* meet code review
This commit is contained in:
Ruonan Wang 2024-03-13 20:12:59 +08:00 committed by GitHub
parent 0dbce53464
commit 2be8bbd236

View file

@ -49,7 +49,8 @@ VERSION = open(os.path.join(BIGDL_PYTHON_HOME,
llm_home = os.path.join(os.path.dirname(os.path.abspath(__file__)), "src") llm_home = os.path.join(os.path.dirname(os.path.abspath(__file__)), "src")
github_artifact_dir = os.path.join(llm_home, '../llm-binary') github_artifact_dir = os.path.join(llm_home, '../llm-binary')
libs_dir = os.path.join(llm_home, "bigdl", "llm", "libs") libs_dir = os.path.join(llm_home, "bigdl", "llm", "libs")
CONVERT_DEP = ['numpy >= 1.22', 'torch', CONVERT_DEP = ['numpy == 1.26.4', # lastet 2.0.0b1 will cause error
'torch',
'transformers == 4.31.0', 'sentencepiece', 'tokenizers == 0.13.3', 'transformers == 4.31.0', 'sentencepiece', 'tokenizers == 0.13.3',
# TODO: Support accelerate 0.22.0 # TODO: Support accelerate 0.22.0
'accelerate == 0.21.0', 'tabulate'] 'accelerate == 0.21.0', 'tabulate']
@ -321,7 +322,8 @@ def setup_package():
"xpu": xpu_requires, # default to ipex 2.1 for linux and windows "xpu": xpu_requires, # default to ipex 2.1 for linux and windows
"xpu-2-0": xpu_20_requires, "xpu-2-0": xpu_20_requires,
"xpu-2-1": xpu_21_requires, "xpu-2-1": xpu_21_requires,
"serving": serving_requires}, "serving": serving_requires,
"cpp": ["bigdl-core-cpp==" + VERSION + ";platform_system=='Linux'"]},
classifiers=[ classifiers=[
'License :: OSI Approved :: Apache Software License', 'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3',