ipex-llm/docs/readthedocs/source/_toc.yml
Shaojun Liu 8fdc8fb197
Quickstart: Run/Develop PyTorch in VSCode with Docker on Intel GPU (#11070)
* add quickstart: Run/Develop PyTorch in VSCode with Docker on Intel GPU

* add gif

* update index.rst

* update link

* update GIFs
2024-05-22 09:29:42 +08:00

79 lines
3.6 KiB
YAML

root: index
subtrees:
- entries:
- file: doc/LLM/index
title: "IPEX-LLM Document"
subtrees:
- entries:
- file: doc/LLM/Overview/llm
title: "LLM in 5 minutes"
- file: doc/LLM/Overview/install
title: "Installation"
subtrees:
- entries:
- file: doc/LLM/Overview/install_cpu
title: "CPU"
- file: doc/LLM/Overview/install_gpu
title: "GPU"
- file: doc/LLM/DockerGuides/index
title: "Docker Guides"
subtrees:
- entries:
- file: doc/LLM/DockerGuides/docker_windows_gpu
- file: doc/LLM/DockerGuides/docker_pytorch_inference_gpu
- file: doc/LLM/DockerGuides/docker_run_pytorch_inference_in_vscode
- file: doc/LLM/DockerGuides/docker_cpp_xpu_quickstart
- file: doc/LLM/Quickstart/index
title: "Quickstart"
subtrees:
- entries:
- file: doc/LLM/Quickstart/bigdl_llm_migration
- file: doc/LLM/Quickstart/install_linux_gpu
- file: doc/LLM/Quickstart/install_windows_gpu
- file: doc/LLM/Quickstart/chatchat_quickstart
- file: doc/LLM/Quickstart/webui_quickstart
- file: doc/LLM/Quickstart/open_webui_with_ollama_quickstart
- file: doc/LLM/Quickstart/privateGPT_quickstart
- file: doc/LLM/Quickstart/continue_quickstart
- file: doc/LLM/Quickstart/dify_quickstart
- file: doc/LLM/Quickstart/benchmark_quickstart
- file: doc/LLM/Quickstart/llama_cpp_quickstart
- file: doc/LLM/Quickstart/ollama_quickstart
- file: doc/LLM/Quickstart/llama3_llamacpp_ollama_quickstart
- file: doc/LLM/Quickstart/fastchat_quickstart
- file: doc/LLM/Quickstart/vLLM_quickstart
- file: doc/LLM/Quickstart/axolotl_quickstart
- file: doc/LLM/Quickstart/deepspeed_autotp_fastapi_quickstart
- file: doc/LLM/Overview/KeyFeatures/index
title: "Key Features"
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/optimize_model
- file: doc/LLM/Overview/KeyFeatures/transformers_style_api
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/hugging_face_format
- file: doc/LLM/Overview/KeyFeatures/native_format
- file: doc/LLM/Overview/KeyFeatures/langchain_api
# - file: doc/LLM/Overview/KeyFeatures/cli
- file: doc/LLM/Overview/KeyFeatures/gpu_supports
subtrees:
- entries:
- file: doc/LLM/Overview/KeyFeatures/inference_on_gpu
- file: doc/LLM/Overview/KeyFeatures/finetune
- file: doc/LLM/Overview/KeyFeatures/multi_gpus_selection
- file: doc/LLM/Overview/examples
title: "Examples"
subtrees:
- entries:
- file: doc/LLM/Overview/examples_cpu
title: "CPU"
- file: doc/LLM/Overview/examples_gpu
title: "GPU"
# - file: doc/LLM/Overview/known_issues
# title: "Tips and Known Issues"
- file: doc/PythonAPI/LLM/index
title: "API Reference"
- file: doc/LLM/Overview/FAQ/faq
title: "FAQ"