Nano: fix some typos of web documentation (#7113)
This commit is contained in:
parent
189a2f5179
commit
614d2ab289
2 changed files with 2 additions and 2 deletions
|
|
@ -68,7 +68,7 @@ You can simply append the following part to enable your [ONNXRuntime](https://on
|
|||
```python
|
||||
# step 4: trace your model as an ONNXRuntime model
|
||||
# if you have run `trainer.fit` before trace, then argument `input_sample` is not required.
|
||||
ort_model = InferenceOptimizer.trace(model, accelerator='onnruntime', input_sample=x)
|
||||
ort_model = InferenceOptimizer.trace(model, accelerator='onnxruntime', input_sample=x)
|
||||
|
||||
# step 5: use returned model for transparent acceleration
|
||||
# The usage is almost the same with any PyTorch module
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ class LitResnet(LightningModule):
|
|||
def __init__(self, learning_rate=0.05, num_processes=1):
|
||||
super().__init__()
|
||||
|
||||
self.save_hyperparameters()
|
||||
self.save_hyperparameters('learning_rate', 'num_processes')
|
||||
self.model = create_model()
|
||||
|
||||
def forward(self, x):
|
||||
|
|
|
|||
Loading…
Reference in a new issue