diff --git a/docs/readthedocs/source/_static/css/nano_installation_guide.css b/docs/readthedocs/source/_static/css/nano_installation_guide.css new file mode 100644 index 00000000..64f35f82 --- /dev/null +++ b/docs/readthedocs/source/_static/css/nano_installation_guide.css @@ -0,0 +1,52 @@ +.displayed { + display: block; + text-align: center; + margin-left: auto; + margin-right: auto; +} + +#table-1 { + border-bottom-width: 0px; + margin:auto; +} + +#table-1 tr, td{ + background-color: var(--pst-color-on-surface); + height: 30px; + border-width: 2px; + border-style: solid; + border-color: white ; + padding: 5px; +} + +#table-1 tr td:first-child{ + font-weight: 600; +} + +#table-1 td{ + font-size: 16px; + font-family: Verdana; + color: var(--pst-color-text-base); + text-align: center; + /* height: 56px; + line-height: 56px; */ + width: 160px; +} + +#table-1 button { + font-size: 16px; + border-color: transparent; + width: 100%; + height: 100%; + background-color: transparent; + cursor: pointer; +} + +#table-1 #cmd { + height: 2em; + text-align: left; +} + +#table-1 .taller_tr { + height: 71px; +} \ No newline at end of file diff --git a/docs/readthedocs/source/_static/js/nano_installation_guide.js b/docs/readthedocs/source/_static/js/nano_installation_guide.js new file mode 100644 index 00000000..b10a272e --- /dev/null +++ b/docs/readthedocs/source/_static/js/nano_installation_guide.js @@ -0,0 +1,177 @@ +var inferences=["inferenceyes", "inferenceno"]; +var frameworks=["pytorch", "tensorflow"]; +var versions=["pytorch_110", "pytorch_111", "pytorch_112", "pytorch_113", "tf2_270"]; +var releases=["stable", "nightly"]; + +var inference="inferenceyes"; +var framework="pytorch"; +var version="pytorch_112"; +var release="nightly"; + +function refresh_cmd(){ + reset_color(frameworks); + reset_color(inferences); + reset_color(releases); + + set_color(framework); + set_color(inference); + set_color(release); + + var cmd="NA"; + + $("#version").empty(); + if(framework=="pytorch"){ + $("#version").append("Pytorch Version\ + \ + \ + \ + "); + } + else if(framework=="tensorflow"){ + $("#version").append("Tensorflow Version\ + "); + } + reset_color(versions); + set_color(version); + + if(release!="nightly"){ + disable(versions); + disable(inferences); + } + else{ + enable(versions); + enable(inferences); + } + + if (framework=="pytorch"){ + document.getElementById("cmd").style.whiteSpace = "normal"; + } + else{ + document.getElementById('cmd').style.whiteSpace = "nowrap"; + } + + if(framework=="pytorch"){ + if(release=="stable"){ + cmd="pip install bigdl-nano[pytorch]==2.1.0"; + }else if(release=="nightly"){ + if(inference=="inferenceyes"){ + if(version=="pytorch_110"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch_110,inference]"; + }else if(version=="pytorch_111"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch_111,inference]"; + }else if(version=="pytorch_112"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch,inference]"; + }else if(version=="pytorch_113"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch_113,inference] pip install neural_compressor==1.14"; + } + }else if(inference="inferenceno"){ + if(version=="pytorch_110"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch_110]"; + }else if(version=="pytorch_111"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch_111]"; + }else if(version=="pytorch_112"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch]"; + }else if(version=="pytorch_113"){ + cmd="pip install --pre --upgrade bigdl-nano[pytorch_113]"; + } + } + } + }else if(framework="tensorflow"){ + if(inference=="inferenceyes"){ + if (version=="tf2_270"){ + if (release=="nightly"){ + cmd="pip install --pre --upgrade bigdl-nano[tensorflow,inference]"; + }else if(release=="stable"){ + cmd="pip install bigdl-nano[tensorflow]==2.1.0"; + } + } + }else if(inference=="inferenceno"){ + if(version=="tf2_270"){ + if(release=="nightly"){ + cmd="pip install --pre --upgrade bigdl-nano[tensorflow]"; + }else if(release=="stable"){ + cmd="pip install bigdl-nano[tensorflow]==2.1.0"; + } + } + } + } + $("#cmd").html(cmd); +} + +function set_color(id){ + $("#"+id).parent().css("background-color","var(--pst-color-primary)"); + $("#"+id).css("color","var(--pst-color-primary-text)"); + $("#"+id).addClass("isset"); +} + +//reset the color of unselected buttons +function reset_color(list){ + for (btn in list){ + $("#"+list[btn]).parent().css("background-color","transparent"); + $("#"+list[btn]).css("color","var(--pst-color-text-base)"); + $("#"+list[btn]).removeClass("isset"); + } +} + +//disable buttons +function disable(list){ + for(btn in list){ + $("#"+list[btn]).css("text-decoration","line-through"); + $("#"+list[btn]).attr("disabled","true"); + } + reset_color(list); + for(btn in list){ + $("#"+list[btn]).parent().css("background-color","var(--pst-color-muted)"); + } +} + +//enable buttons +function enable(list){ + for(btn in list){ + $("#"+list[btn]).css("text-decoration","none"); + $("#"+list[btn]).attr("disabled",false); + } +} + +//when clicked a button, update variables +$(document).on('click',"button",function(){ + var id = $(this).attr("id"); + + if (frameworks.indexOf(id)>=0){ + framework=id; + if (framework=="tensorflow"){ + version="tf2_270"; + }else{ + version="pytorch_112"; + } + } + else if (releases.indexOf(id)>=0){ + release=id; + } + else if (inferences.indexOf(id)>=0){ + inference=id; + } + else if (versions.indexOf(id)>=0){ + version=id; + } + + refresh_cmd(); +}) + +//func to add button hover effect +$(document).on({ + mouseenter: function () { + if($(this).prop("disabled")!=true){ + $(this).parent().css("background-color","var(--pst-color-primary)"); + $(this).css("color","var(--pst-color-primary-text)"); + } + }, + mouseleave: function () { + if(!$(this).hasClass("isset") && $(this).prop("disabled")!=true){ + $(this).parent().css("background-color","transparent"); + $(this).css("color","var(--pst-color-text-base)"); + } + } +}, "button"); + +refresh_cmd(); diff --git a/docs/readthedocs/source/doc/Nano/Overview/install.md b/docs/readthedocs/source/doc/Nano/Overview/install.md index 7b08082e..c65081c8 100644 --- a/docs/readthedocs/source/doc/Nano/Overview/install.md +++ b/docs/readthedocs/source/doc/Nano/Overview/install.md @@ -5,20 +5,48 @@ Note: For windows users, we recommend using Windows Subsystem for Linux 2 (WSL2) BigDL-Nano can be installed using pip and we recommend installing BigDL-Nano in a conda environment. -For PyTorch Users, you can install bigdl-nano along with some dependencies specific to PyTorch using the following commands. +You can select bigdl-nano along with some dependencies specific to PyTorch or Tensorflow using the following panel. -```bash -conda create -n env -conda activate env -pip install --pre --upgrade bigdl-nano[pytorch] -``` +```eval_rst +.. raw:: html -For TensorFlow users, you can install bigdl-nano along with some dependencies specific to TensorFlow using the following commands. + -```bash -conda create -n env -conda activate env -pip install --pre --upgrade bigdl-nano[tensorflow] +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FrameWork
Version
Inference Optimization
Release
Install CMDNA
+
+ + ``` We also partially support M-series chip users with no guarantee of acceleration with same API. Currently only tensorflow is experimentally supported. @@ -37,16 +65,19 @@ pip install --pre --upgrade bigdl-nano[tensorflow] For stable version, please refer to the document and installation guide `here `_ . ``` -After installing bigdl-nano, you can run the following command to setup a few environment variables. - ```bash +conda create -n env +conda activate env +# select your preference in above panel to find the proper command to replace the below command, e.g. +pip install --pre --upgrade bigdl-chronos[pytorch] +# after installing bigdl-nano, you can run the following command to setup a few environment variables. source bigdl-nano-init ``` The `bigdl-nano-init` scripts will export a few environment variable according to your hardware to maximize performance. -In a conda environment, `source bigdl-nano-init` will also be added to `$CONDA_PREFIX/etc/conda/activate.d/`, which will automaticly run when you activate your current environment. +In a conda environment, when you run `source bigdl-nano-init` manually, this command will also be added to `$CONDA_PREFIX/etc/conda/activate.d/`, which will automaticly run when you activate your current environment. In a pure pip environment, you need to run `source bigdl-nano-init` every time you open a new shell to get optimal performance and run `source bigdl-nano-unset-env` if you want to unset these environment variables. ---- \ No newline at end of file +---