diff --git a/docs/readthedocs/source/_toc.yml b/docs/readthedocs/source/_toc.yml index 0e01696e..4e7e9553 100644 --- a/docs/readthedocs/source/_toc.yml +++ b/docs/readthedocs/source/_toc.yml @@ -114,8 +114,13 @@ subtrees: - file: doc/Nano/Howto/Training/General/choose_num_processes_training - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_onnx - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_openvino + - file: doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_jit_ipex - file: doc/Nano/Howto/Inference/PyTorch/quantize_pytorch_inference_inc - file: doc/Nano/Howto/Inference/PyTorch/quantize_pytorch_inference_pot + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_ipex + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_jit + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_onnx + - file: doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_openvino - file: doc/Nano/Howto/Inference/PyTorch/inference_optimizer_optimize - file: doc/Nano/Howto/install_in_colab - file: doc/Nano/Howto/windows_guide diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_jit_ipex.nblink b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_jit_ipex.nblink new file mode 100644 index 00000000..a242e2c4 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/accelerate_pytorch_inference_jit_ipex.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../../../../../../python/nano/tutorial/notebook/inference/pytorch/accelerate_pytorch_inference_jit_ipex.ipynb" +} \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_ipex.nblink b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_ipex.nblink new file mode 100644 index 00000000..7f88781a --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_ipex.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../../../../../../python/nano/tutorial/notebook/inference/pytorch/pytorch_save_and_load_ipex.ipynb" +} \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_jit.nblink b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_jit.nblink new file mode 100644 index 00000000..0e47915a --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_jit.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../../../../../../python/nano/tutorial/notebook/inference/pytorch/pytorch_save_and_load_jit.ipynb" +} \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_onnx.nblink b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_onnx.nblink new file mode 100644 index 00000000..1777bb80 --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_onnx.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../../../../../../python/nano/tutorial/notebook/inference/pytorch/pytorch_save_and_load_onnx.ipynb" +} \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_openvino.nblink b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_openvino.nblink new file mode 100644 index 00000000..9b8b481d --- /dev/null +++ b/docs/readthedocs/source/doc/Nano/Howto/Inference/PyTorch/pytorch_save_and_load_openvino.nblink @@ -0,0 +1,3 @@ +{ + "path": "../../../../../../../../python/nano/tutorial/notebook/inference/pytorch/pytorch_save_and_load_openvino.ipynb" +} \ No newline at end of file diff --git a/docs/readthedocs/source/doc/Nano/Howto/index.rst b/docs/readthedocs/source/doc/Nano/Howto/index.rst index 7d542dac..e7f74736 100644 --- a/docs/readthedocs/source/doc/Nano/Howto/index.rst +++ b/docs/readthedocs/source/doc/Nano/Howto/index.rst @@ -49,8 +49,13 @@ PyTorch * `How to accelerate a PyTorch inference pipeline through ONNXRuntime `_ * `How to accelerate a PyTorch inference pipeline through OpenVINO `_ +* `How to accelerate a PyTorch inference pipeline through JIT/IPEX `_ * `How to quantize your PyTorch model for inference using Intel Neural Compressor `_ * `How to quantize your PyTorch model for inference using OpenVINO Post-training Optimization Tools `_ +* `How to save and load optimized IPEX model `_ +* `How to save and load optimized JIT model `_ +* `How to save and load optimized ONNXRuntime model `_ +* `How to save and load optimized OpenVINO model `_ * `How to find accelerated method with minimal latency using InferenceOptimizer `_ Install