* initial commit * update llama.cpp * add demo video at first * fix ollama link in readme * meet review * update * small fix
		
			
				
	
	
		
			67 lines
		
	
	
	
		
			2.9 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
			
		
		
	
	
			67 lines
		
	
	
	
		
			2.9 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
root: index
 | 
						|
subtrees:
 | 
						|
  - entries:
 | 
						|
    - file: doc/LLM/index
 | 
						|
      title: "IPEX-LLM Document"
 | 
						|
      subtrees:
 | 
						|
        - entries:
 | 
						|
          - file: doc/LLM/Overview/llm
 | 
						|
            title: "LLM in 5 minutes"
 | 
						|
          - file: doc/LLM/Overview/install
 | 
						|
            title: "Installation"
 | 
						|
            subtrees:
 | 
						|
              - entries:
 | 
						|
                - file: doc/LLM/Overview/install_cpu
 | 
						|
                  title: "CPU"
 | 
						|
                - file: doc/LLM/Overview/install_gpu
 | 
						|
                  title: "GPU"
 | 
						|
          - file: doc/LLM/Quickstart/index
 | 
						|
            title: "Quickstart"
 | 
						|
            subtrees:
 | 
						|
              - entries:
 | 
						|
                - file: doc/LLM/Quickstart/bigdl_llm_migration
 | 
						|
                - file: doc/LLM/Quickstart/install_linux_gpu
 | 
						|
                - file: doc/LLM/Quickstart/install_windows_gpu
 | 
						|
                - file: doc/LLM/Quickstart/docker_windows_gpu
 | 
						|
                - file: doc/LLM/Quickstart/chatchat_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/webui_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/open_webui_with_ollama_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/continue_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/benchmark_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/llama_cpp_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/ollama_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/llama3_llamacpp_ollama_quickstart
 | 
						|
                - file: doc/LLM/Quickstart/fastchat_quickstart
 | 
						|
          - file: doc/LLM/Overview/KeyFeatures/index
 | 
						|
            title: "Key Features"
 | 
						|
            subtrees:
 | 
						|
              - entries:
 | 
						|
                - file: doc/LLM/Overview/KeyFeatures/optimize_model
 | 
						|
                - file: doc/LLM/Overview/KeyFeatures/transformers_style_api
 | 
						|
                  subtrees:
 | 
						|
                    - entries:
 | 
						|
                      - file: doc/LLM/Overview/KeyFeatures/hugging_face_format
 | 
						|
                      - file: doc/LLM/Overview/KeyFeatures/native_format
 | 
						|
                - file: doc/LLM/Overview/KeyFeatures/langchain_api
 | 
						|
                # - file: doc/LLM/Overview/KeyFeatures/cli
 | 
						|
                - file: doc/LLM/Overview/KeyFeatures/gpu_supports
 | 
						|
                  subtrees:
 | 
						|
                    - entries:
 | 
						|
                      - file: doc/LLM/Overview/KeyFeatures/inference_on_gpu
 | 
						|
                      - file: doc/LLM/Overview/KeyFeatures/finetune
 | 
						|
                      - file: doc/LLM/Overview/KeyFeatures/multi_gpus_selection
 | 
						|
          - file: doc/LLM/Overview/examples
 | 
						|
            title: "Examples"
 | 
						|
            subtrees:
 | 
						|
              - entries:
 | 
						|
                - file: doc/LLM/Overview/examples_cpu
 | 
						|
                  title: "CPU"
 | 
						|
                - file: doc/LLM/Overview/examples_gpu
 | 
						|
                  title: "GPU"
 | 
						|
          # - file: doc/LLM/Overview/known_issues
 | 
						|
          #   title: "Tips and Known Issues"
 | 
						|
          - file: doc/PythonAPI/LLM/index
 | 
						|
            title: "API Reference"
 | 
						|
          - file: doc/LLM/Overview/FAQ/faq
 | 
						|
            title: "FAQ"
 | 
						|
 |