Chronos: train tcn model on gpu and speed up inference on cpu (#5594)
* add nano_gpu * simplify the code * simplify the code * modify the code * add some updates * add document Co-authored-by: theaperdeng <theaperdeng@outlook.com>
This commit is contained in:
parent
dcff01690b
commit
c73fc9f6ad
2 changed files with 14 additions and 3 deletions
|
|
@ -51,7 +51,8 @@ $(".checkboxes").click(function(){
|
|||
var ids = ["ChronosForecaster","TuneaForecasting","AutoTSEstimator","AutoWIDE",
|
||||
"MultvarWIDE","MultstepWIDE","LSTMForecaster","AutoProphet","AnomalyDetection",
|
||||
"DeepARmodel","TFTmodel","hyperparameter","taxiDataset","distributedFashion",
|
||||
"ONNX","Quantize","TCMFForecaster","PenalizeUnderestimation"];
|
||||
"ONNX","Quantize","TCMFForecaster","PenalizeUnderestimation",
|
||||
"GPUtrainingCPUacceleration"];
|
||||
showTutorials(ids);
|
||||
var disIds = ["simulation"];
|
||||
disCheck(disIds);
|
||||
|
|
@ -94,7 +95,7 @@ $(".checkboxes").click(function(){
|
|||
disCheck(disIds);
|
||||
}
|
||||
else if(vals.includes("customized_model")){
|
||||
var ids = ["AutoTSEstimator","DeepARmodel","TFTmodel"];
|
||||
var ids = ["AutoTSEstimator","DeepARmodel","TFTmodel", "GPUtrainingCPUacceleration"];
|
||||
showTutorials(ids);
|
||||
var disIds = ["anomaly_detection","simulation","onnxruntime","quantization","distributed"];
|
||||
disCheck(disIds);
|
||||
|
|
@ -114,7 +115,7 @@ $(".checkboxes").click(function(){
|
|||
disCheck(disIds);
|
||||
}
|
||||
else if(vals.includes("forecast") && vals.includes("customized_model")){
|
||||
var ids = ["DeepARmodel","TFTmodel","AutoTSEstimator"];
|
||||
var ids = ["DeepARmodel","TFTmodel","AutoTSEstimator","GPUtrainingCPUacceleration"];
|
||||
showTutorials(ids);
|
||||
var disIds = ["anomaly_detection","simulation","onnxruntime","quantization","distributed"];
|
||||
disCheck(disIds);
|
||||
|
|
|
|||
|
|
@ -244,6 +244,16 @@
|
|||
</details>
|
||||
<hr>
|
||||
|
||||
<details id="GPUtrainingCPUacceleration">
|
||||
<summary>
|
||||
<a href="https://github.com/intel-analytics/BigDL/tree/main/python/chronos/example/inference-acceleration">Accelerate the inference speed of model trained on other platform</a>
|
||||
<p>Tag: <button value="forecast">forecast</button> <button value="customized_model">customized model</button></p>
|
||||
</summary>
|
||||
<img src="../../../_images/GitHub-Mark-32px.png"><a href="https://github.com/intel-analytics/BigDL/tree/main/python/chronos/example/inference-acceleration">View source on GitHub</a>
|
||||
<p>In this example, we show an example to train the model on GPU and accelerate the model by using onnxruntime on CPU.</p>
|
||||
</details>
|
||||
<hr>
|
||||
|
||||
</div>
|
||||
|
||||
<script src="../../../_static/js/chronos_tutorial.js"></script>
|
||||
|
|
|
|||
Loading…
Reference in a new issue