From c31136df0b141d45d1c732ee3c511e5575092e9f Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Fri, 3 Feb 2023 09:37:10 +0800 Subject: [PATCH] [Nano] Improve How-to Guides Navigations (#7396) * Remove deprecated option enable_auto_doc_ref for recommonmark * Add first level navigation structure for Nano how-to guides * Update navigation for How-to Training part * Update navigation for How-to Inference part * Update navigation for How-to Preprocessing/Install part and other small fixes * Fix wrong link path caused by position changes of how-to install related guides * Small fix --- docs/readthedocs/source/_toc.yml | 118 ++++++++++++------ docs/readthedocs/source/conf.py | 3 +- .../Nano/Howto/Inference/OpenVINO/index.rst | 6 + .../Nano/Howto/Inference/PyTorch/index.rst | 18 +++ .../Nano/Howto/Inference/TensorFlow/index.rst | 8 ++ .../source/doc/Nano/Howto/Inference/index.rst | 33 +++++ .../source/doc/Nano/Howto/Install/index.rst | 7 ++ .../Howto/{ => Install}/install_in_colab.md | 0 .../Nano/Howto/{ => Install}/windows_guide.md | 0 .../Howto/Preprocessing/PyTorch/index.rst | 4 + .../doc/Nano/Howto/Preprocessing/index.rst | 15 +++ .../doc/Nano/Howto/Training/General/index.rst | 4 + .../doc/Nano/Howto/Training/PyTorch/index.rst | 14 +++ .../Howto/Training/PyTorchLightning/index.rst | 7 ++ .../Nano/Howto/Training/TensorFlow/index.rst | 9 ++ .../source/doc/Nano/Howto/Training/index.rst | 42 +++++++ .../source/doc/Nano/Howto/index.rst | 4 +- .../source/doc/Nano/Overview/install.md | 2 +- 18 files changed, 252 insertions(+), 42 deletions(-) create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Inference/OpenVINO/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Inference/TensorFlow/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Inference/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Install/index.rst rename docs/readthedocs/source/doc/Nano/Howto/{ => Install}/install_in_colab.md (100%) rename docs/readthedocs/source/doc/Nano/Howto/{ => Install}/windows_guide.md (100%) create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Preprocessing/PyTorch/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Preprocessing/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Training/General/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Training/PyTorch/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Training/PyTorchLightning/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Training/TensorFlow/index.rst create mode 100644 docs/readthedocs/source/doc/Nano/Howto/Training/index.rst diff --git a/docs/readthedocs/source/_toc.yml b/docs/readthedocs/source/_toc.yml index c8233ef8..e4f398d8 100644 --- a/docs/readthedocs/source/_toc.yml +++ b/docs/readthedocs/source/_toc.yml @@ -101,43 +101,87 @@ subtrees: title: "How-to Guides" subtrees: - entries: - - file: doc/Nano/Howto/Preprocessing/PyTorch/accelerate_pytorch_cv_data_pipeline - - file: doc/Nano/Howto/Training/PyTorchLightning/accelerate_pytorch_lightning_training_ipex - - file: doc/Nano/Howto/Training/PyTorchLightning/accelerate_pytorch_lightning_training_multi_instance - - file: doc/Nano/Howto/Training/PyTorchLightning/pytorch_lightning_training_channels_last - - file: doc/Nano/Howto/Training/PyTorchLightning/pytorch_lightning_training_bf16 - - file: doc/Nano/Howto/Training/PyTorch/convert_pytorch_training_torchnano - - file: doc/Nano/Howto/Training/PyTorch/use_nano_decorator_pytorch_training - - file: doc/Nano/Howto/Training/PyTorch/accelerate_pytorch_training_ipex - - file: doc/Nano/Howto/Training/PyTorch/accelerate_pytorch_training_multi_instance - - file: doc/Nano/Howto/Training/PyTorch/pytorch_training_channels_last - - file: doc/Nano/Howto/Training/PyTorch/accelerate_pytorch_training_bf16 - - file: doc/Nano/Howto/Training/TensorFlow/accelerate_tensorflow_training_multi_instance - - file: doc/Nano/Howto/Training/TensorFlow/tensorflow_training_embedding_sparseadam - - file: doc/Nano/Howto/Training/TensorFlow/tensorflow_training_bf16 - - file: doc/Nano/Howto/Training/General/choose_num_processes_training - - file: doc/Nano/Howto/Inference/OpenVINO/openvino_inference - - file: doc/Nano/Howto/Inference/OpenVINO/openvino_inference_async - - file: doc/Nano/Howto/Inference/OpenVINO/accelerate_inference_openvino_gpu - - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_onnx - - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_openvino - - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_jit_ipex - - file: doc/Nano/Howto/Inference/PyTorch/multi_instance_pytorch_inference - - file: doc/Nano/Howto/Inference/PyTorch/quantize_pytorch_inference_inc - - file: doc/Nano/Howto/Inference/PyTorch/quantize_pytorch_inference_pot - - file: doc/Nano/Howto/Inference/PyTorch/pytorch_context_manager - - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_ipex - - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_jit - - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_onnx - - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_openvino - - file: doc/Nano/Howto/Inference/PyTorch/inference_optimizer_optimize - - file: doc/Nano/Howto/Inference/TensorFlow/accelerate_tensorflow_inference_onnx - - file: doc/Nano/Howto/Inference/TensorFlow/accelerate_tensorflow_inference_openvino - - file: doc/Nano/Howto/Inference/TensorFlow/tensorflow_inference_bf16 - - file: doc/Nano/Howto/Inference/TensorFlow/tensorflow_save_and_load_onnx - - file: doc/Nano/Howto/Inference/TensorFlow/tensorflow_save_and_load_openvino - - file: doc/Nano/Howto/install_in_colab - - file: doc/Nano/Howto/windows_guide + - file: doc/Nano/Howto/Preprocessing/index + subtrees: + - entries: + - file: doc/Nano/Howto/Preprocessing/PyTorch/index + title: "PyTorch" + subtrees: + - entries: + - file: doc/Nano/Howto/Preprocessing/PyTorch/accelerate_pytorch_cv_data_pipeline + - file: doc/Nano/Howto/Training/index + subtrees: + - entries: + - file: doc/Nano/Howto/Training/PyTorchLightning/index + title: "PyTorch Lightning" + subtrees: + - entries: + - file: doc/Nano/Howto/Training/PyTorchLightning/accelerate_pytorch_lightning_training_ipex + - file: doc/Nano/Howto/Training/PyTorchLightning/accelerate_pytorch_lightning_training_multi_instance + - file: doc/Nano/Howto/Training/PyTorchLightning/pytorch_lightning_training_channels_last + - file: doc/Nano/Howto/Training/PyTorchLightning/pytorch_lightning_training_bf16 + - file: doc/Nano/Howto/Training/PyTorch/index + title: "PyTorch" + subtrees: + - entries: + - file: doc/Nano/Howto/Training/PyTorch/convert_pytorch_training_torchnano + - file: doc/Nano/Howto/Training/PyTorch/use_nano_decorator_pytorch_training + - file: doc/Nano/Howto/Training/PyTorch/accelerate_pytorch_training_ipex + - file: doc/Nano/Howto/Training/PyTorch/accelerate_pytorch_training_multi_instance + - file: doc/Nano/Howto/Training/PyTorch/pytorch_training_channels_last + - file: doc/Nano/Howto/Training/PyTorch/accelerate_pytorch_training_bf16 + - file: doc/Nano/Howto/Training/TensorFlow/index + title: "TensorFlow" + subtrees: + - entries: + - file: doc/Nano/Howto/Training/TensorFlow/accelerate_tensorflow_training_multi_instance + - file: doc/Nano/Howto/Training/TensorFlow/tensorflow_training_embedding_sparseadam + - file: doc/Nano/Howto/Training/TensorFlow/tensorflow_training_bf16 + - file: doc/Nano/Howto/Training/General/index + title: "General" + subtrees: + - entries: + - file: doc/Nano/Howto/Training/General/choose_num_processes_training + - file: doc/Nano/Howto/Inference/index + subtrees: + - entries: + - file: doc/Nano/Howto/Inference/OpenVINO/index + title: "OpenVINO" + subtrees: + - entries: + - file: doc/Nano/Howto/Inference/OpenVINO/openvino_inference + - file: doc/Nano/Howto/Inference/OpenVINO/openvino_inference_async + - file: doc/Nano/Howto/Inference/OpenVINO/accelerate_inference_openvino_gpu + - file: doc/Nano/Howto/Inference/PyTorch/index + title: "PyTorch" + subtrees: + - entries: + - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_onnx + - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_openvino + - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_jit_ipex + - file: doc/Nano/Howto/Inference/PyTorch/multi_instance_pytorch_inference + - file: doc/Nano/Howto/Inference/PyTorch/quantize_pytorch_inference_inc + - file: doc/Nano/Howto/Inference/PyTorch/quantize_pytorch_inference_pot + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_context_manager + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_ipex + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_jit + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_onnx + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_openvino + - file: doc/Nano/Howto/Inference/PyTorch/inference_optimizer_optimize + - file: doc/Nano/Howto/Inference/TensorFlow/index + title: "TensorFlow" + subtrees: + - entries: + - file: doc/Nano/Howto/Inference/TensorFlow/accelerate_tensorflow_inference_onnx + - file: doc/Nano/Howto/Inference/TensorFlow/accelerate_tensorflow_inference_openvino + - file: doc/Nano/Howto/Inference/TensorFlow/tensorflow_inference_bf16 + - file: doc/Nano/Howto/Inference/TensorFlow/tensorflow_save_and_load_onnx + - file: doc/Nano/Howto/Inference/TensorFlow/tensorflow_save_and_load_openvino + - file: doc/Nano/Howto/Install/index + subtrees: + - entries: + - file: doc/Nano/Howto/Install/install_in_colab + - file: doc/Nano/Howto/Install/windows_guide - file: doc/Nano/Overview/known_issues title: "Tips and Known Issues" - file: doc/Nano/Overview/troubshooting diff --git a/docs/readthedocs/source/conf.py b/docs/readthedocs/source/conf.py index 9e69718d..43fcd088 100644 --- a/docs/readthedocs/source/conf.py +++ b/docs/readthedocs/source/conf.py @@ -269,8 +269,7 @@ def setup(app): 'auto_toc_tree_section': 'Contents', 'enable_math': False, 'enable_inline_math': False, - 'enable_eval_rst': True, - 'enable_auto_doc_ref': True, + 'enable_eval_rst': True }, True) app.add_transform(AutoStructify) diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/OpenVINO/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Inference/OpenVINO/index.rst new file mode 100644 index 00000000..1111bd72 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/OpenVINO/index.rst @@ -0,0 +1,6 @@ +Inference Optimization: For OpenVINO Users +============================================= + +* `How to run inference on OpenVINO model `_ +* `How to run asynchronous inference on OpenVINO model `_ +* `How to accelerate a PyTorch / TensorFlow inference pipeline on Intel GPUs through OpenVINO `_ diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/index.rst new file mode 100644 index 00000000..6241866c --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/index.rst @@ -0,0 +1,18 @@ +Inference Optimization: For PyTorch Users +============================================= + +* `How to accelerate a PyTorch inference pipeline through ONNXRuntime `_ +* `How to accelerate a PyTorch inference pipeline through OpenVINO `_ +* `How to accelerate a PyTorch inference pipeline through JIT/IPEX `_ +* `How to accelerate a PyTorch inference pipeline through multiple instances `_ +* `How to quantize your PyTorch model for inference using Intel Neural Compressor `_ +* `How to quantize your PyTorch model for inference using OpenVINO Post-training Optimization Tools `_ +* |pytorch_inference_context_manager_link|_ +* `How to save and load optimized IPEX model `_ +* `How to save and load optimized JIT model `_ +* `How to save and load optimized ONNXRuntime model `_ +* `How to save and load optimized OpenVINO model `_ +* `How to find accelerated method with minimal latency using InferenceOptimizer `_ + +.. |pytorch_inference_context_manager_link| replace:: How to use context manager through ``get_context`` +.. _pytorch_inference_context_manager_link: pytorch_context_manager.html \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/TensorFlow/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Inference/TensorFlow/index.rst new file mode 100644 index 00000000..7a4cf3d5 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/TensorFlow/index.rst @@ -0,0 +1,8 @@ +Inference Optimization: For TensorFlow Users +============================================= + +* `How to accelerate a TensorFlow inference pipeline through ONNXRuntime `_ +* `How to accelerate a TensorFlow inference pipeline through OpenVINO `_ +* `How to conduct BFloat16 Mixed Precision inference in a TensorFlow Keras application `_ +* `How to save and load optimized ONNXRuntime model in TensorFlow `_ +* `How to save and load optimized OpenVINO model in TensorFlow `_ diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Inference/index.rst new file mode 100644 index 00000000..cd5865fb --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/index.rst @@ -0,0 +1,33 @@ +Inference Optimization +========================= + +Here you could find detailed guides on how to apply BigDL-Nano to optimize your inference workloads. Select your desired use case below for further navigation: + +.. grid:: 1 2 2 2 + + .. grid-item:: + + .. button-link:: OpenVINO/index.html + :color: primary + :expand: + :outline: + + I use **OpenVINO** toolkit. + + .. grid-item:: + + .. button-link:: PyTorch/index.html + :color: primary + :expand: + :outline: + + I am a **PyTorch** user. + + .. grid-item:: + + .. button-link:: TensorFlow/index.html + :color: primary + :expand: + :outline: + + I am a **TensorFlow** user. \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Install/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Install/index.rst new file mode 100644 index 00000000..d326cd91 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Install/index.rst @@ -0,0 +1,7 @@ +Install +========================= + +Here you could find detailed guides on how to install BigDL-Nano for different use cases: + +* `How to install BigDL-Nano in Google Colab `_ +* `How to install BigDL-Nano on Windows `_ \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/install_in_colab.md b/docs/readthedocs/source/doc/Nano/Howto/Install/install_in_colab.md similarity index 100% rename from docs/readthedocs/source/doc/Nano/Howto/install_in_colab.md rename to docs/readthedocs/source/doc/Nano/Howto/Install/install_in_colab.md diff --git a/docs/readthedocs/source/doc/Nano/Howto/windows_guide.md b/docs/readthedocs/source/doc/Nano/Howto/Install/windows_guide.md similarity index 100% rename from docs/readthedocs/source/doc/Nano/Howto/windows_guide.md rename to docs/readthedocs/source/doc/Nano/Howto/Install/windows_guide.md diff --git a/docs/readthedocs/source/doc/Nano/Howto/Preprocessing/PyTorch/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Preprocessing/PyTorch/index.rst new file mode 100644 index 00000000..a0a6e53d --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Preprocessing/PyTorch/index.rst @@ -0,0 +1,4 @@ +Preprocessing Optimization: For PyTorch Users +============================================== + +* `How to accelerate a computer vision data processing pipeline `_ \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Preprocessing/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Preprocessing/index.rst new file mode 100644 index 00000000..caf9ff03 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Preprocessing/index.rst @@ -0,0 +1,15 @@ +Preprocessing Optimization +=========================== + +Here you could find detailed guides on how to apply BigDL-Nano to accelerate your data preprocess pipeline. Select your desired use case below for further navigation: + +.. grid:: 1 2 2 2 + + .. grid-item:: + + .. button-link:: PyTorch/index.html + :color: primary + :expand: + :outline: + + I am a **PyTorch** user. \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Training/General/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Training/General/index.rst new file mode 100644 index 00000000..39cf6c5a --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Training/General/index.rst @@ -0,0 +1,4 @@ +Training Optimization: General Tips +==================================== + +* `How to choose the number of processes for multi-instance training `_ \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Training/PyTorch/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Training/PyTorch/index.rst new file mode 100644 index 00000000..da2b7b5d --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Training/PyTorch/index.rst @@ -0,0 +1,14 @@ +Training Optimization: For PyTorch Users +========================================= + +* |convert_pytorch_training_torchnano|_ +* |use_nano_decorator_pytorch_training|_ +* `How to accelerate a PyTorch application on training workloads through IntelĀ® Extension for PyTorch* `_ +* `How to accelerate a PyTorch application on training workloads through multiple instances `_ +* `How to use the channels last memory format in your PyTorch application for training `_ +* `How to conduct BFloat16 Mixed Precision training in your PyTorch application `_ + +.. |use_nano_decorator_pytorch_training| replace:: How to accelerate your PyTorch training loop with ``@nano`` decorator +.. _use_nano_decorator_pytorch_training: use_nano_decorator_pytorch_training.html +.. |convert_pytorch_training_torchnano| replace:: How to convert your PyTorch training loop to use ``TorchNano`` for acceleration +.. _convert_pytorch_training_torchnano: convert_pytorch_training_torchnano.html diff --git a/docs/readthedocs/source/doc/Nano/Howto/Training/PyTorchLightning/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Training/PyTorchLightning/index.rst new file mode 100644 index 00000000..23824489 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Training/PyTorchLightning/index.rst @@ -0,0 +1,7 @@ +Training Optimization: For PyTorch Lightning Users +=================================================== + +* `How to accelerate a PyTorch Lightning application on training workloads through IntelĀ® Extension for PyTorch* `_ +* `How to accelerate a PyTorch Lightning application on training workloads through multiple instances `_ +* `How to use the channels last memory format in your PyTorch Lightning application for training `_ +* `How to conduct BFloat16 Mixed Precision training in your PyTorch Lightning application `_ diff --git a/docs/readthedocs/source/doc/Nano/Howto/Training/TensorFlow/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Training/TensorFlow/index.rst new file mode 100644 index 00000000..8756d3a5 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Training/TensorFlow/index.rst @@ -0,0 +1,9 @@ +Training Optimization: For TensorFlow Users +============================================ + +* `How to accelerate a TensorFlow Keras application on training workloads through multiple instances `_ +* |tensorflow_training_embedding_sparseadam_link|_ +* `How to conduct BFloat16 Mixed Precision training in your TensorFlow application `_ + +.. |tensorflow_training_embedding_sparseadam_link| replace:: How to optimize your model with a sparse ``Embedding`` layer and ``SparseAdam`` optimizer +.. _tensorflow_training_embedding_sparseadam_link: tensorflow_training_embedding_sparseadam.html diff --git a/docs/readthedocs/source/doc/Nano/Howto/Training/index.rst b/docs/readthedocs/source/doc/Nano/Howto/Training/index.rst new file mode 100644 index 00000000..b0720e3d --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Training/index.rst @@ -0,0 +1,42 @@ +Training Optimization +========================= + +Here you could find detailed guides on how to apply BigDL-Nano to optimize your training workloads. Select your desired use case below for further navigation: + +.. grid:: 1 2 2 2 + + .. grid-item:: + + .. button-link:: PyTorchLightning/index.html + :color: primary + :expand: + :outline: + + I am a **PyTorch Lightning** user. + + .. grid-item:: + + .. button-link:: PyTorch/index.html + :color: primary + :expand: + :outline: + + I am a **PyTorch** user. + + .. grid-item:: + + .. button-link:: TensorFlow/index.html + :color: primary + :expand: + :outline: + + I am a **TensorFlow** user. + + .. grid-item:: + + .. button-link:: General/index.html + :color: primary + :expand: + :outline: + + I want to know general optimization tips. \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/index.rst b/docs/readthedocs/source/doc/Nano/Howto/index.rst index 9f37cb07..4e760eda 100644 --- a/docs/readthedocs/source/doc/Nano/Howto/index.rst +++ b/docs/readthedocs/source/doc/Nano/Howto/index.rst @@ -89,5 +89,5 @@ TensorFlow Install ------------------------- -* `How to install BigDL-Nano in Google Colab `_ -* `How to install BigDL-Nano on Windows `_ \ No newline at end of file +* `How to install BigDL-Nano in Google Colab `_ +* `How to install BigDL-Nano on Windows `_ \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Overview/install.md b/docs/readthedocs/source/doc/Nano/Overview/install.md index 35d02000..25c97055 100644 --- a/docs/readthedocs/source/doc/Nano/Overview/install.md +++ b/docs/readthedocs/source/doc/Nano/Overview/install.md @@ -91,7 +91,7 @@ For Linux, Ubuntu (22.04/20.04/18.04) is recommended. For Windows OS, users could only run `bigdl-nano-init` every time they open a new cmd terminal. -We recommend using Windows Subsystem for Linux 2 (WSL2) to run BigDL-Nano. Please refer to [Nano Windows install guide](../Howto/windows_guide.md) for instructions. +We recommend using Windows Subsystem for Linux 2 (WSL2) to run BigDL-Nano. Please refer to [Nano Windows install guide](../Howto/Install/windows_guide.md) for instructions. ### Install on MacOS #### MacOS with Intel Chip