add more langchain examples (#8542)

* update langchain descriptions

* add mathchain example

* update readme

* update readme
This commit is contained in:
Shengsheng Huang 2023-07-19 17:42:18 +08:00 committed by GitHub
parent 3bd1420b71
commit 616b7cb0a2
7 changed files with 80 additions and 8 deletions

View file

@ -25,7 +25,7 @@ Follow the instructions in [Convert model](https://github.com/intel-analytics/Bi
### 1. Streaming Chat
```bash
python ./streamchat.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -q QUESTION -t THREAD_NUM
python native_int4/streamchat.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -q QUESTION -t THREAD_NUM
```
arguments info:
- `-m CONVERTED_MODEL_PATH`: **required**, path to the converted model
@ -35,7 +35,7 @@ arguments info:
### 2. Question Answering over Docs
```bash
python ./docqa.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -i DOC_PATH -q QUESTION -c CONTEXT_SIZE -t THREAD_NUM
python native_int4/docqa.py -m CONVERTED_MODEL_PATH -x MODEL_FAMILY -i DOC_PATH -q QUESTION -c CONTEXT_SIZE -t THREAD_NUM
```
arguments info:
- `-m CONVERTED_MODEL_PATH`: **required**, path to the converted model in above step
@ -58,7 +58,7 @@ pip install soundfile
```
```bash
python ./voiceassistant.py -x MODEL_FAMILY -m CONVERTED_MODEL_PATH -t THREAD_NUM
python native_int4/voiceassistant.py -x MODEL_FAMILY -m CONVERTED_MODEL_PATH -t THREAD_NUM
```
arguments info:
@ -70,3 +70,15 @@ When you see output says
> listening now...
Please say something through your microphone (e.g. What is AI). The programe will automatically detect when you have completed your speech and recogize them.
### 4. Math
This is an example using `LLMMathChain`. This example has been validated using [phoenix-7b](https://huggingface.co/FreedomIntelligence/phoenix-inst-chat-7b).
```bash
python transformers_int4/math.py -m MODEL_PATH -q QUESTION
```
arguments info:
- `-m CONVERTED_MODEL_PATH`: **required**, path to the transformers model
- `-q QUESTION`: question to ask. Default is `What is 13 raised to the .3432 power?`.

View file

@ -19,6 +19,8 @@
# Otherwise there would be module not found error in non-pip's setting as Python would
# only search the first bigdl package and end up finding only one sub-package.
# Code is adapted from https://python.langchain.com/docs/modules/chains/additional/question_answering.html
import argparse
from langchain.vectorstores import Chroma
@ -71,7 +73,7 @@ def main(args):
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='BigDL-LLM Langchain Question Answering over Docs Example')
parser = argparse.ArgumentParser(description='BigdlNativeLLM Langchain QA over Docs Example')
parser.add_argument('-x','--model-family', type=str, required=True,
choices=["llama", "bloom", "gptneox"],
help='the model family')

View file

@ -55,7 +55,7 @@ def main(args):
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='BigDL-LLM Langchain Streaming Chat Example')
parser = argparse.ArgumentParser(description='BigdlNativeLLM Langchain Streaming Chat Example')
parser.add_argument('-x','--model-family', type=str, required=True,
choices=["llama", "bloom", "gptneox"],
help='the model family')

View file

@ -109,7 +109,7 @@ def main(args):
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='BigDL-LLM Langchain Voice Assistant Example')
parser = argparse.ArgumentParser(description='BigdlNativeLLM Langchain Voice Assistant Example')
parser.add_argument('-x','--model-family', type=str, required=True,
help='the model family')
parser.add_argument('-m','--model-path', type=str, required=True,

View file

@ -53,7 +53,7 @@ def main(args):
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Llama-CPP-Python style API Simple Example')
parser = argparse.ArgumentParser(description='TransformersLLM Langchain Chat Example')
parser.add_argument('-m','--model-path', type=str, required=True,
help='the path to transformers model')
parser.add_argument('-q', '--question', type=str, default='What is AI?',

View file

@ -19,6 +19,8 @@
# Otherwise there would be module not found error in non-pip's setting as Python would
# only search the first bigdl package and end up finding only one sub-package.
# Code is adapted from https://python.langchain.com/docs/modules/chains/additional/question_answering.html
import argparse
from langchain.vectorstores import Chroma
@ -66,7 +68,7 @@ def main(args):
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Transformer-int4 style API Simple Example')
parser = argparse.ArgumentParser(description='TransformersLLM Langchain QA over Docs Example')
parser.add_argument('-m','--model-path', type=str, required=True,
help='the path to transformers model')
parser.add_argument('-i', '--input-path', type=str,

View file

@ -0,0 +1,56 @@
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This would makes sure Python is aware there is more than one sub-package within bigdl,
# physically located elsewhere.
# Otherwise there would be module not found error in non-pip's setting as Python would
# only search the first bigdl package and end up finding only one sub-package.
# Code is adapted from https://python.langchain.com/docs/modules/chains/additional/llm_math
import argparse
from langchain.chains import LLMMathChain
from bigdl.llm.langchain.llms import TransformersLLM, TransformersPipelineLLM
def main(args):
question = args.question
model_path = args.model_path
llm = TransformersLLM.from_model_id(
model_id=model_path,
model_kwargs={"temperature": 0, "max_length": 1024, "trust_remote_code": True},
)
llm_math = LLMMathChain.from_llm(llm, verbose=True)
output = llm_math.run(question)
print("====output=====")
print(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TransformersLLM Langchain Math Example')
parser.add_argument('-m','--model-path', type=str, required=True,
help='the path to transformers model')
parser.add_argument('-q', '--question', type=str, default='What is 13 raised to the .3432 power?',
help='qustion you want to ask.')
args = parser.parse_args()
main(args)