/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Describes the result metrics for the test data associated with an
* documentation classifier.See Also:
AWS
* API Reference
The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.
*/ inline double GetAccuracy() const{ return m_accuracy; } /** *The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.
*/ inline bool AccuracyHasBeenSet() const { return m_accuracyHasBeenSet; } /** *The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.
*/ inline void SetAccuracy(double value) { m_accuracyHasBeenSet = true; m_accuracy = value; } /** *The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.
*/ inline ClassifierEvaluationMetrics& WithAccuracy(double value) { SetAccuracy(value); return *this;} /** *A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.
*/ inline double GetPrecision() const{ return m_precision; } /** *A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.
*/ inline bool PrecisionHasBeenSet() const { return m_precisionHasBeenSet; } /** *A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.
*/ inline void SetPrecision(double value) { m_precisionHasBeenSet = true; m_precision = value; } /** *A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.
*/ inline ClassifierEvaluationMetrics& WithPrecision(double value) { SetPrecision(value); return *this;} /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.
*/ inline double GetRecall() const{ return m_recall; } /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.
*/ inline bool RecallHasBeenSet() const { return m_recallHasBeenSet; } /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.
*/ inline void SetRecall(double value) { m_recallHasBeenSet = true; m_recall = value; } /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.
*/ inline ClassifierEvaluationMetrics& WithRecall(double value) { SetRecall(value); return *this;} /** *A measure of how accurate the classifier results are for the test data. It is
* derived from the Precision and Recall values. The
* F1Score is the harmonic average of the two scores. The highest
* score is 1, and the worst score is 0.
A measure of how accurate the classifier results are for the test data. It is
* derived from the Precision and Recall values. The
* F1Score is the harmonic average of the two scores. The highest
* score is 1, and the worst score is 0.
A measure of how accurate the classifier results are for the test data. It is
* derived from the Precision and Recall values. The
* F1Score is the harmonic average of the two scores. The highest
* score is 1, and the worst score is 0.
A measure of how accurate the classifier results are for the test data. It is
* derived from the Precision and Recall values. The
* F1Score is the harmonic average of the two scores. The highest
* score is 1, and the worst score is 0.
A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.
*/ inline double GetMicroPrecision() const{ return m_microPrecision; } /** *A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.
*/ inline bool MicroPrecisionHasBeenSet() const { return m_microPrecisionHasBeenSet; } /** *A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.
*/ inline void SetMicroPrecision(double value) { m_microPrecisionHasBeenSet = true; m_microPrecision = value; } /** *A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.
*/ inline ClassifierEvaluationMetrics& WithMicroPrecision(double value) { SetMicroPrecision(value); return *this;} /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.
*/ inline double GetMicroRecall() const{ return m_microRecall; } /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.
*/ inline bool MicroRecallHasBeenSet() const { return m_microRecallHasBeenSet; } /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.
*/ inline void SetMicroRecall(double value) { m_microRecallHasBeenSet = true; m_microRecall = value; } /** *A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.
*/ inline ClassifierEvaluationMetrics& WithMicroRecall(double value) { SetMicroRecall(value); return *this;} /** *A measure of how accurate the classifier results are for the test data. It is
* a combination of the Micro Precision and Micro Recall
* values. The Micro F1Score is the harmonic mean of the two scores.
* The highest score is 1, and the worst score is 0.
A measure of how accurate the classifier results are for the test data. It is
* a combination of the Micro Precision and Micro Recall
* values. The Micro F1Score is the harmonic mean of the two scores.
* The highest score is 1, and the worst score is 0.
A measure of how accurate the classifier results are for the test data. It is
* a combination of the Micro Precision and Micro Recall
* values. The Micro F1Score is the harmonic mean of the two scores.
* The highest score is 1, and the worst score is 0.
A measure of how accurate the classifier results are for the test data. It is
* a combination of the Micro Precision and Micro Recall
* values. The Micro F1Score is the harmonic mean of the two scores.
* The highest score is 1, and the worst score is 0.
Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.
*/ inline double GetHammingLoss() const{ return m_hammingLoss; } /** *Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.
*/ inline bool HammingLossHasBeenSet() const { return m_hammingLossHasBeenSet; } /** *Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.
*/ inline void SetHammingLoss(double value) { m_hammingLossHasBeenSet = true; m_hammingLoss = value; } /** *Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.
*/ inline ClassifierEvaluationMetrics& WithHammingLoss(double value) { SetHammingLoss(value); return *this;} private: double m_accuracy; bool m_accuracyHasBeenSet; double m_precision; bool m_precisionHasBeenSet; double m_recall; bool m_recallHasBeenSet; double m_f1Score; bool m_f1ScoreHasBeenSet; double m_microPrecision; bool m_microPrecisionHasBeenSet; double m_microRecall; bool m_microRecallHasBeenSet; double m_microF1Score; bool m_microF1ScoreHasBeenSet; double m_hammingLoss; bool m_hammingLossHasBeenSet; }; } // namespace Model } // namespace Comprehend } // namespace Aws