diff --git a/python/llm/example/langchain/README.md b/python/llm/example/langchain/README.md index 6f7451e5..22340524 100644 --- a/python/llm/example/langchain/README.md +++ b/python/llm/example/langchain/README.md @@ -25,7 +25,7 @@ Follow the instructions in [Convert model](https://github.com/intel-analytics/Bi ### 1. Streaming Chat ```bash -python ./streamchat.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -q QUESTION -t THREAD_NUM +python native_int4/streamchat.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -q QUESTION -t THREAD_NUM ``` arguments info: - `-m CONVERTED_MODEL_PATH`: **required**, path to the converted model @@ -35,7 +35,7 @@ arguments info: ### 2. Question Answering over Docs ```bash -python ./docqa.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -i DOC_PATH -q QUESTION -c CONTEXT_SIZE -t THREAD_NUM +python native_int4/docqa.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -i DOC_PATH -q QUESTION -c CONTEXT_SIZE -t THREAD_NUM ``` arguments info: - `-m CONVERTED_MODEL_PATH`: **required**, path to the converted model in above step @@ -58,7 +58,7 @@ pip install soundfile ``` ```bash -python ./voiceassistant.py -x MODEL_FAMILY -m CONVERTED_MODEL_PATH -t THREAD_NUM +python native_int4/voiceassistant.py -x MODEL_FAMILY -m CONVERTED_MODEL_PATH -t THREAD_NUM ``` arguments info: @@ -70,3 +70,15 @@ When you see output says > listening now... Please say something through your microphone (e.g. What is AI). The programe will automatically detect when you have completed your speech and recogize them. + +### 4. Math + +This is an example using `LLMMathChain`. This example has been validated using [phoenix-7b](https://huggingface.co/FreedomIntelligence/phoenix-inst-chat-7b). + +```bash +python transformers_int4/math.py -m MODEL_PATH -q QUESTION +``` +arguments info: +- `-m CONVERTED_MODEL_PATH`: **required**, path to the transformers model +- `-q QUESTION`: question to ask. Default is `What is 13 raised to the .3432 power?`. + diff --git a/python/llm/example/langchain/native_int4/docqa.py b/python/llm/example/langchain/native_int4/docqa.py index 42d808bf..bda20e66 100644 --- a/python/llm/example/langchain/native_int4/docqa.py +++ b/python/llm/example/langchain/native_int4/docqa.py @@ -19,6 +19,8 @@ # Otherwise there would be module not found error in non-pip's setting as Python would # only search the first bigdl package and end up finding only one sub-package. +# Code is adapted from https://python.langchain.com/docs/modules/chains/additional/question_answering.html + import argparse from langchain.vectorstores import Chroma @@ -71,7 +73,7 @@ def main(args): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='BigDL-LLM Langchain Question Answering over Docs Example') + parser = argparse.ArgumentParser(description='BigdlNativeLLM Langchain QA over Docs Example') parser.add_argument('-x','--model-family', type=str, required=True, choices=["llama", "bloom", "gptneox"], help='the model family') diff --git a/python/llm/example/langchain/native_int4/streamchat.py b/python/llm/example/langchain/native_int4/streamchat.py index e9d52838..4a680cce 100644 --- a/python/llm/example/langchain/native_int4/streamchat.py +++ b/python/llm/example/langchain/native_int4/streamchat.py @@ -55,7 +55,7 @@ def main(args): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='BigDL-LLM Langchain Streaming Chat Example') + parser = argparse.ArgumentParser(description='BigdlNativeLLM Langchain Streaming Chat Example') parser.add_argument('-x','--model-family', type=str, required=True, choices=["llama", "bloom", "gptneox"], help='the model family') diff --git a/python/llm/example/langchain/native_int4/voiceassistant.py b/python/llm/example/langchain/native_int4/voiceassistant.py index 2324e58e..0d275549 100644 --- a/python/llm/example/langchain/native_int4/voiceassistant.py +++ b/python/llm/example/langchain/native_int4/voiceassistant.py @@ -109,7 +109,7 @@ def main(args): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='BigDL-LLM Langchain Voice Assistant Example') + parser = argparse.ArgumentParser(description='BigdlNativeLLM Langchain Voice Assistant Example') parser.add_argument('-x','--model-family', type=str, required=True, help='the model family') parser.add_argument('-m','--model-path', type=str, required=True, diff --git a/python/llm/example/langchain/transformers_int4/chat.py b/python/llm/example/langchain/transformers_int4/chat.py index 3c704672..e6be1ca1 100644 --- a/python/llm/example/langchain/transformers_int4/chat.py +++ b/python/llm/example/langchain/transformers_int4/chat.py @@ -53,7 +53,7 @@ def main(args): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Llama-CPP-Python style API Simple Example') + parser = argparse.ArgumentParser(description='TransformersLLM Langchain Chat Example') parser.add_argument('-m','--model-path', type=str, required=True, help='the path to transformers model') parser.add_argument('-q', '--question', type=str, default='What is AI?', diff --git a/python/llm/example/langchain/transformers_int4/docqa.py b/python/llm/example/langchain/transformers_int4/docqa.py index e42fa866..93aa0675 100644 --- a/python/llm/example/langchain/transformers_int4/docqa.py +++ b/python/llm/example/langchain/transformers_int4/docqa.py @@ -19,6 +19,8 @@ # Otherwise there would be module not found error in non-pip's setting as Python would # only search the first bigdl package and end up finding only one sub-package. +# Code is adapted from https://python.langchain.com/docs/modules/chains/additional/question_answering.html + import argparse from langchain.vectorstores import Chroma @@ -66,7 +68,7 @@ def main(args): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Transformer-int4 style API Simple Example') + parser = argparse.ArgumentParser(description='TransformersLLM Langchain QA over Docs Example') parser.add_argument('-m','--model-path', type=str, required=True, help='the path to transformers model') parser.add_argument('-i', '--input-path', type=str, diff --git a/python/llm/example/langchain/transformers_int4/math.py b/python/llm/example/langchain/transformers_int4/math.py new file mode 100644 index 00000000..456ac567 --- /dev/null +++ b/python/llm/example/langchain/transformers_int4/math.py @@ -0,0 +1,56 @@ + +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This would makes sure Python is aware there is more than one sub-package within bigdl, +# physically located elsewhere. +# Otherwise there would be module not found error in non-pip's setting as Python would +# only search the first bigdl package and end up finding only one sub-package. + +# Code is adapted from https://python.langchain.com/docs/modules/chains/additional/llm_math + +import argparse + +from langchain.chains import LLMMathChain +from bigdl.llm.langchain.llms import TransformersLLM, TransformersPipelineLLM + + +def main(args): + + question = args.question + model_path = args.model_path + + llm = TransformersLLM.from_model_id( + model_id=model_path, + model_kwargs={"temperature": 0, "max_length": 1024, "trust_remote_code": True}, + ) + + llm_math = LLMMathChain.from_llm(llm, verbose=True) + + output = llm_math.run(question) + print("====output=====") + print(output) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='TransformersLLM Langchain Math Example') + parser.add_argument('-m','--model-path', type=str, required=True, + help='the path to transformers model') + parser.add_argument('-q', '--question', type=str, default='What is 13 raised to the .3432 power?', + help='qustion you want to ask.') + args = parser.parse_args() + + main(args) \ No newline at end of file