ipex-llm/docs/readthedocs/source/_toc.yml
Wang, Jian4 86cec80b51
LLM: Add llm inference_cpp_xpu_docker (#10933)
* test_cpp_docker

* update

* update

* update

* update

* add sudo

* update nodejs version

* no need npm

* remove blinker

* new cpp docker

* restore

* add line

* add manually_build

* update and add mtl

* update for workdir llm

* add benchmark part

* update readme

* update 1024-128

* update readme

* update

* fix

* update

* update

* update readme too

* update readme

* no change

* update dir_name

* update readme
2024-05-15 11:10:22 +08:00

77 lines
3.4 KiB
YAML

root: index
subtrees:
- entries:
- file: doc/LLM/index
title: "IPEX-LLM Document"
subtrees:
- entries:
- file: doc/LLM/Overview/llm
title: "LLM in 5 minutes"
- file: doc/LLM/Overview/install
title: "Installation"
subtrees:
- entries:
- file: doc/LLM/Overview/install_cpu
title: "CPU"
- file: doc/LLM/Overview/install_gpu
title: "GPU"
- file: doc/LLM/DockerGuides/index
title: "Docker Guides"
subtrees:
- entries:
- file: doc/LLM/DockerGuides/docker_windows_gpu
- file: doc/LLM/DockerGuides/docker_pytorch_inference_gpu
- file: doc/LLM/Quickstart/index
title: "Quickstart"
subtrees:
- entries:
- file: doc/LLM/Quickstart/bigdl_llm_migration
- file: doc/LLM/Quickstart/install_linux_gpu
- file: doc/LLM/Quickstart/install_windows_gpu
- file: doc/LLM/Quickstart/chatchat_quickstart
- file: doc/LLM/Quickstart/webui_quickstart
- file: doc/LLM/Quickstart/open_webui_with_ollama_quickstart
- file: doc/LLM/Quickstart/privateGPT_quickstart
- file: doc/LLM/Quickstart/continue_quickstart
- file: doc/LLM/Quickstart/dify_quickstart
- file: doc/LLM/Quickstart/benchmark_quickstart
- file: doc/LLM/Quickstart/llama_cpp_quickstart
- file: doc/LLM/Quickstart/ollama_quickstart
- file: doc/LLM/Quickstart/llama3_llamacpp_ollama_quickstart
- file: doc/LLM/Quickstart/fastchat_quickstart
- file: doc/LLM/Quickstart/axolotl_quickstart
- file: doc/LLM/Quickstart/deepspeed_autotp_fastapi_quickstart
- file: doc/LLM/Quickstart/docker_cpp_xpu_quickstart
- file: doc/LLM/Overview/KeyFeatures/index
title: "Key Features"
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/optimize_model
- file: doc/LLM/Overview/KeyFeatures/transformers_style_api
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/hugging_face_format
- file: doc/LLM/Overview/KeyFeatures/native_format
- file: doc/LLM/Overview/KeyFeatures/langchain_api
# - file: doc/LLM/Overview/KeyFeatures/cli
- file: doc/LLM/Overview/KeyFeatures/gpu_supports
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/inference_on_gpu
- file: doc/LLM/Overview/KeyFeatures/finetune
- file: doc/LLM/Overview/KeyFeatures/multi_gpus_selection
- file: doc/LLM/Overview/examples
title: "Examples"
subtrees:
- entries:
- file: doc/LLM/Overview/examples_cpu
title: "CPU"
- file: doc/LLM/Overview/examples_gpu
title: "GPU"
# - file: doc/LLM/Overview/known_issues
# title: "Tips and Known Issues"
- file: doc/PythonAPI/LLM/index
title: "API Reference"
- file: doc/LLM/Overview/FAQ/faq
title: "FAQ"