diff --git a/.github/workflows/llm-c-evaluation.yml b/.github/workflows/llm-c-evaluation.yml index b5a4b07e..0c2a2f22 100644 --- a/.github/workflows/llm-c-evaluation.yml +++ b/.github/workflows/llm-c-evaluation.yml @@ -79,7 +79,7 @@ jobs: echo "model_name=$model_name" >> $GITHUB_OUTPUT echo "precision=$precision" >> $GITHUB_OUTPUT echo "runner=$runner" >> $GITHUB_OUTPUT - llm-ceval-evalution: + llm-ceval-evaluation: timeout-minutes: 1200 needs: [llm-cpp-build, set-matrix] strategy: @@ -175,7 +175,7 @@ jobs: llm-ceval-summary: if: ${{ always() }} - needs: llm-ceval-evalution + needs: llm-ceval-evaluation runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/llm-harness-evaluation.yml b/.github/workflows/llm-harness-evaluation.yml index 03d89622..f01c08ea 100644 --- a/.github/workflows/llm-harness-evaluation.yml +++ b/.github/workflows/llm-harness-evaluation.yml @@ -1,4 +1,4 @@ -name: LLM Harness Evalution +name: LLM Harness Evaluation # Cancel previous runs in the PR when you push new commits concurrency: @@ -20,19 +20,19 @@ on: workflow_dispatch: inputs: model_name: - description: 'Model names, seperated by comma and must be quoted.' + description: 'Model names, separated by comma and must be quoted.' required: true type: string precision: - description: 'Precisions, seperated by comma and must be quoted.' + description: 'Precisions, separated by comma and must be quoted.' required: true type: string task: - description: 'Tasks, seperated by comma and must be quoted.' + description: 'Tasks, separated by comma and must be quoted.' required: true type: string runs-on: - description: 'Labels to filter the runners, seperated by comma and must be quoted.' + description: 'Labels to filter the runners, separated by comma and must be quoted.' default: "accuracy" required: false type: string @@ -97,7 +97,7 @@ jobs: echo "precision=$precision" >> $GITHUB_OUTPUT echo "task=$task" >> $GITHUB_OUTPUT echo "runner=$runner" >> $GITHUB_OUTPUT - llm-harness-evalution: + llm-harness-evaluation: timeout-minutes: 1000 needs: [llm-cpp-build, set-matrix] strategy: @@ -201,7 +201,7 @@ jobs: llm-harness-summary: if: ${{ always() }} - needs: llm-harness-evalution + needs: llm-harness-evaluation runs-on: ubuntu-latest steps: - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 @@ -228,7 +228,7 @@ jobs: # TODO: change machine to store the results later llm-harness-summary-nightly: if: ${{github.event_name == 'schedule' || github.event_name == 'pull_request'}} - needs: [set-matrix, llm-harness-evalution] + needs: [set-matrix, llm-harness-evaluation] runs-on: ["self-hosted", "llm", "accuracy1", "accuracy-nightly"] steps: - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3 diff --git a/.github/workflows/llm-ppl-evaluation.yml b/.github/workflows/llm-ppl-evaluation.yml index 3a18ddfd..d379a71a 100644 --- a/.github/workflows/llm-ppl-evaluation.yml +++ b/.github/workflows/llm-ppl-evaluation.yml @@ -1,4 +1,4 @@ -name: LLM Perplexity Evalution +name: LLM Perplexity Evaluation # Cancel previous runs in the PR when you push new commits concurrency: @@ -24,11 +24,11 @@ on: required: true type: string model_name: - description: 'Model names, seperated by comma and must be quoted.' + description: 'Model names, separated by comma and must be quoted.' required: true type: string precision: - description: 'Precisions, seperated by comma and must be quoted.' + description: 'Precisions, separated by comma and must be quoted.' required: true type: string language: @@ -36,7 +36,7 @@ on: required: true type: string runs-on: - description: 'Labels to filter the runners, seperated by comma and must be quoted.' + description: 'Labels to filter the runners, separated by comma and must be quoted.' default: "accuracy" required: false type: string @@ -107,7 +107,7 @@ jobs: echo "precision=$precision" >> $GITHUB_OUTPUT echo "language=$language" >> $GITHUB_OUTPUT echo "runner=$runner" >> $GITHUB_OUTPUT - llm-ppl-evalution: + llm-ppl-evaluation: timeout-minutes: 1000 needs: [llm-cpp-build, set-matrix] strategy: diff --git a/docs/readthedocs/source/doc/Chronos/Overview/simulation.md b/docs/readthedocs/source/doc/Chronos/Overview/simulation.md index 03fcc3bd..6d2488e4 100644 --- a/docs/readthedocs/source/doc/Chronos/Overview/simulation.md +++ b/docs/readthedocs/source/doc/Chronos/Overview/simulation.md @@ -8,7 +8,7 @@ Chronos provides simulators to generate synthetic time series data for users who ``` ## 1. DPGANSimulator -`DPGANSimulator` adopt DoppelGANger raised in [Using GANs for Sharing Networked Time Series Data: Challenges, Initial Promise, and Open Questions](http://arxiv.org/abs/1909.13403). The method is data-driven unsupervised method based on deep learning model with GAN (Generative Adversarial Networks) structure. The model features a pair of seperate attribute generator and feature generator and their corresponding discriminators `DPGANSimulator` also supports a rich and comprehensive input data (training data) format and outperform other algorithms in many evalution metrics. +`DPGANSimulator` adopt DoppelGANger raised in [Using GANs for Sharing Networked Time Series Data: Challenges, Initial Promise, and Open Questions](http://arxiv.org/abs/1909.13403). The method is data-driven unsupervised method based on deep learning model with GAN (Generative Adversarial Networks) structure. The model features a pair of separate attribute generator and feature generator and their corresponding discriminators `DPGANSimulator` also supports a rich and comprehensive input data (training data) format and outperform other algorithms in many evaluation metrics. ```eval_rst .. note:: diff --git a/docs/readthedocs/source/doc/Chronos/QuickStart/index.md b/docs/readthedocs/source/doc/Chronos/QuickStart/index.md index 7776674a..c207b645 100644 --- a/docs/readthedocs/source/doc/Chronos/QuickStart/index.md +++ b/docs/readthedocs/source/doc/Chronos/QuickStart/index.md @@ -281,7 +281,7 @@
View source on GitHub
- This example will demonstrate how to use ONNX to speed up the inferencing(prediction/evalution) on forecasters and AutoTSEstimator. In this example, onnx speed up the inferencing for ~4X.
+This example will demonstrate how to use ONNX to speed up the inferencing(prediction/evaluation) on forecasters and AutoTSEstimator. In this example, onnx speed up the inferencing for ~4X.