ipex-llm/docs/readthedocs/source/_toc.yml
Keyan (Kyrie) Zhang 37820e1d86
Add privateGPT quickstart (#10932)
* Add privateGPT quickstart

* Update privateGPT_quickstart.md

* Update _toc.yml

* Update _toc.yml

---------

Co-authored-by: Shengsheng Huang <shengsheng.huang@intel.com>
2024-05-08 20:48:00 +08:00

70 lines
3.1 KiB
YAML

root: index
subtrees:
- entries:
- file: doc/LLM/index
title: "IPEX-LLM Document"
subtrees:
- entries:
- file: doc/LLM/Overview/llm
title: "LLM in 5 minutes"
- file: doc/LLM/Overview/install
title: "Installation"
subtrees:
- entries:
- file: doc/LLM/Overview/install_cpu
title: "CPU"
- file: doc/LLM/Overview/install_gpu
title: "GPU"
- file: doc/LLM/Quickstart/index
title: "Quickstart"
subtrees:
- entries:
- file: doc/LLM/Quickstart/bigdl_llm_migration
- file: doc/LLM/Quickstart/install_linux_gpu
- file: doc/LLM/Quickstart/install_windows_gpu
- file: doc/LLM/Quickstart/docker_windows_gpu
- file: doc/LLM/Quickstart/chatchat_quickstart
- file: doc/LLM/Quickstart/webui_quickstart
- file: doc/LLM/Quickstart/open_webui_with_ollama_quickstart
- file: doc/LLM/Quickstart/privateGPT_quickstart
- file: doc/LLM/Quickstart/continue_quickstart
- file: doc/LLM/Quickstart/benchmark_quickstart
- file: doc/LLM/Quickstart/llama_cpp_quickstart
- file: doc/LLM/Quickstart/ollama_quickstart
- file: doc/LLM/Quickstart/llama3_llamacpp_ollama_quickstart
- file: doc/LLM/Quickstart/fastchat_quickstart
- file: doc/LLM/Quickstart/axolotl_quickstart
- file: doc/LLM/Quickstart/deepspeed_autotp_fastapi_quickstart
- file: doc/LLM/Overview/KeyFeatures/index
title: "Key Features"
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/optimize_model
- file: doc/LLM/Overview/KeyFeatures/transformers_style_api
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/hugging_face_format
- file: doc/LLM/Overview/KeyFeatures/native_format
- file: doc/LLM/Overview/KeyFeatures/langchain_api
# - file: doc/LLM/Overview/KeyFeatures/cli
- file: doc/LLM/Overview/KeyFeatures/gpu_supports
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/inference_on_gpu
- file: doc/LLM/Overview/KeyFeatures/finetune
- file: doc/LLM/Overview/KeyFeatures/multi_gpus_selection
- file: doc/LLM/Overview/examples
title: "Examples"
subtrees:
- entries:
- file: doc/LLM/Overview/examples_cpu
title: "CPU"
- file: doc/LLM/Overview/examples_gpu
title: "GPU"
# - file: doc/LLM/Overview/known_issues
# title: "Tips and Known Issues"
- file: doc/PythonAPI/LLM/index
title: "API Reference"
- file: doc/LLM/Overview/FAQ/faq
title: "FAQ"