diff --git a/docs/readthedocs/source/doc/Nano/Overview/pytorch_inference.md b/docs/readthedocs/source/doc/Nano/Overview/pytorch_inference.md index 8a4e1b76..0ad449fd 100644 --- a/docs/readthedocs/source/doc/Nano/Overview/pytorch_inference.md +++ b/docs/readthedocs/source/doc/Nano/Overview/pytorch_inference.md @@ -160,12 +160,12 @@ There are a few arguments required only by INC, and you should not specify or mo Here is an example to use INC with accuracy control as below. It will search for a model within 1% accuracy drop with 10 trials. ```python -from torchmetrics.classification import Accuracy +from torchmetrics.classification import MulticlassAccuracy InferenceOptimizer.quantize(model, precision='int8', accelerator=None, calib_data=dataloader, - metric=Accuracy() + metric=MulticlassAccuracy(num_classes=10) accuracy_criterion={'relative': 0.01, 'higher_is_better': True}, approach='static', method='fx', @@ -182,7 +182,7 @@ InferenceOptimizer.quantize(model, precision='int8', accelerator='openvino', calib_data=dataloader, - metric=Accuracy() + metric=MulticlassAccuracy(num_classes=10) accuracy_criterion={'relative': 0.01, 'higher_is_better': True}, approach='static', max_trials=10, diff --git a/docs/readthedocs/source/doc/Nano/Overview/tensorflow_inference.md b/docs/readthedocs/source/doc/Nano/Overview/tensorflow_inference.md index 610649c5..fda74a5c 100644 --- a/docs/readthedocs/source/doc/Nano/Overview/tensorflow_inference.md +++ b/docs/readthedocs/source/doc/Nano/Overview/tensorflow_inference.md @@ -76,12 +76,12 @@ There are a few arguments required only by INC. - `outputs`: A list of output names. Default: None, automatically get names from the graph. Here is an example to use INC with accuracy control as below. It will search for a model within 1% accuracy drop with 10 trials. ```python -from torchmetrics.classification import Accuracy +from torchmetrics.classification import MulticlassAccuracy q_model = model.quantize(precision='int8', accelerator=None, calib_dataset= train_dataset, - metric=Accuracy(), + metric=MulticlassAccuracy(num_classes=10), accuracy_criterion={'relative': 0.01, 'higher_is_better': True}, approach='static', tuning_strategy='bayesian', diff --git a/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc.md b/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc.md index 5d0d3864..289eaf34 100644 --- a/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc.md +++ b/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc.md @@ -52,7 +52,7 @@ train_dataloader = DataLoader(train_dataset, batch_size=32) import torch from torchvision.models import resnet18 from bigdl.nano.pytorch import Trainer -from torchmetrics import Accuracy +from torchmetrics.classification import MulticlassAccuracy model_ft = resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features @@ -62,7 +62,7 @@ loss_ft = torch.nn.CrossEntropyLoss() optimizer_ft = torch.optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) # Compile our model with loss function, optimizer. -model = Trainer.compile(model_ft, loss_ft, optimizer_ft, metrics=[Accuracy]) +model = Trainer.compile(model_ft, loss_ft, optimizer_ft, metrics=[MulticlassAccuracy(num_classes=37)]) trainer = Trainer(max_epochs=5) trainer.fit(model, train_dataloader=train_dataloader) @@ -79,8 +79,8 @@ Quantization is widely used to compress models to a lower precision, which not o Without extra accelerator, `InferenceOptimizer.quantize()` returns a pytorch module with desired precision and accuracy. You can add quantization as below: ```python from bigdl.nano.pytorch import InferenceOptimizer -from torchmetrics.functional import accuracy -q_model = InferenceOptimizer.quantize(model, calib_data=train_dataloader, metric=accuracy) +from torchmetrics.classification import MulticlassAccuracy +q_model = InferenceOptimizer.quantize(model, calib_data=train_dataloader, metric=MulticlassAccuracy(num_classes=37)) # run simple prediction y_hat = q_model(x) diff --git a/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc_onnx.md b/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc_onnx.md index 8ba8a04b..51c2dc15 100644 --- a/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc_onnx.md +++ b/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_inc_onnx.md @@ -51,7 +51,7 @@ train_dataloader = DataLoader(train_dataset, batch_size=32) import torch from torchvision.models import resnet18 from bigdl.nano.pytorch import Trainer -from torchmetrics import Accuracy +from torchmetrics.classification import MulticlassAccuracy model_ft = resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features @@ -61,7 +61,7 @@ loss_ft = torch.nn.CrossEntropyLoss() optimizer_ft = torch.optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) # Compile our model with loss function, optimizer. -model = Trainer.compile(model_ft, loss_ft, optimizer_ft, metrics=[Accuracy]) +model = Trainer.compile(model_ft, loss_ft, optimizer_ft, metrics=[MulticlassAccuracy(num_classes=37)]) trainer = Trainer(max_epochs=5) trainer.fit(model, train_dataloader=train_dataloader) @@ -78,8 +78,8 @@ With the ONNXRuntime accelerator, `InferenceOptimizer.quantize()` will return a you can add quantization as below: ```python from bigdl.nano.pytorch import InferenceOptimizer -from torchmetrics.functional import accuracy -ort_q_model = InferenceOptimizer.quantize(model, accelerator='onnxruntime', calib_data=train_dataloader, metric=accuracy) +from torchmetrics.classification import MulticlassAccuracy +ort_q_model = InferenceOptimizer.quantize(model, accelerator='onnxruntime', calib_data=train_dataloader, metric=MulticlassAccuracy(num_classes=37)) # run simple prediction y_hat = ort_q_model(x) diff --git a/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_openvino.md b/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_openvino.md index 6d3de952..d59accd5 100644 --- a/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_openvino.md +++ b/docs/readthedocs/source/doc/Nano/QuickStart/pytorch_quantization_openvino.md @@ -77,7 +77,6 @@ y_hat.argmax(dim=1) Accelerator='openvino' means using OpenVINO POT to do quantization. The quantization can be added as below: ```python from bigdl.nano.pytorch import InferenceOptimizer -from torchmetrics import Accuracy ov_q_model = InferenceOptimizer.quantize(model, accelerator="openvino", calib_data=data_loader) # run simple prediction