/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace Comprehend { namespace Model { /** *

Describes the result metrics for the test data associated with an * documentation classifier.

See Also:

AWS * API Reference

*/ class AWS_COMPREHEND_API ClassifierEvaluationMetrics { public: ClassifierEvaluationMetrics(); ClassifierEvaluationMetrics(Aws::Utils::Json::JsonView jsonValue); ClassifierEvaluationMetrics& operator=(Aws::Utils::Json::JsonView jsonValue); Aws::Utils::Json::JsonValue Jsonize() const; /** *

The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.

*/ inline double GetAccuracy() const{ return m_accuracy; } /** *

The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.

*/ inline bool AccuracyHasBeenSet() const { return m_accuracyHasBeenSet; } /** *

The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.

*/ inline void SetAccuracy(double value) { m_accuracyHasBeenSet = true; m_accuracy = value; } /** *

The fraction of the labels that were correct recognized. It is computed by * dividing the number of labels in the test documents that were correctly * recognized by the total number of labels in the test documents.

*/ inline ClassifierEvaluationMetrics& WithAccuracy(double value) { SetAccuracy(value); return *this;} /** *

A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.

*/ inline double GetPrecision() const{ return m_precision; } /** *

A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.

*/ inline bool PrecisionHasBeenSet() const { return m_precisionHasBeenSet; } /** *

A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.

*/ inline void SetPrecision(double value) { m_precisionHasBeenSet = true; m_precision = value; } /** *

A measure of the usefulness of the classifier results in the test data. High * precision means that the classifier returned substantially more relevant results * than irrelevant ones.

*/ inline ClassifierEvaluationMetrics& WithPrecision(double value) { SetPrecision(value); return *this;} /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.

*/ inline double GetRecall() const{ return m_recall; } /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.

*/ inline bool RecallHasBeenSet() const { return m_recallHasBeenSet; } /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.

*/ inline void SetRecall(double value) { m_recallHasBeenSet = true; m_recall = value; } /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results.

*/ inline ClassifierEvaluationMetrics& WithRecall(double value) { SetRecall(value); return *this;} /** *

A measure of how accurate the classifier results are for the test data. It is * derived from the Precision and Recall values. The * F1Score is the harmonic average of the two scores. The highest * score is 1, and the worst score is 0.

*/ inline double GetF1Score() const{ return m_f1Score; } /** *

A measure of how accurate the classifier results are for the test data. It is * derived from the Precision and Recall values. The * F1Score is the harmonic average of the two scores. The highest * score is 1, and the worst score is 0.

*/ inline bool F1ScoreHasBeenSet() const { return m_f1ScoreHasBeenSet; } /** *

A measure of how accurate the classifier results are for the test data. It is * derived from the Precision and Recall values. The * F1Score is the harmonic average of the two scores. The highest * score is 1, and the worst score is 0.

*/ inline void SetF1Score(double value) { m_f1ScoreHasBeenSet = true; m_f1Score = value; } /** *

A measure of how accurate the classifier results are for the test data. It is * derived from the Precision and Recall values. The * F1Score is the harmonic average of the two scores. The highest * score is 1, and the worst score is 0.

*/ inline ClassifierEvaluationMetrics& WithF1Score(double value) { SetF1Score(value); return *this;} /** *

A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.

*/ inline double GetMicroPrecision() const{ return m_microPrecision; } /** *

A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.

*/ inline bool MicroPrecisionHasBeenSet() const { return m_microPrecisionHasBeenSet; } /** *

A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.

*/ inline void SetMicroPrecision(double value) { m_microPrecisionHasBeenSet = true; m_microPrecision = value; } /** *

A measure of the usefulness of the recognizer results in the test data. High * precision means that the recognizer returned substantially more relevant results * than irrelevant ones. Unlike the Precision metric which comes from averaging the * precision of all available labels, this is based on the overall score of all * precision scores added together.

*/ inline ClassifierEvaluationMetrics& WithMicroPrecision(double value) { SetMicroPrecision(value); return *this;} /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.

*/ inline double GetMicroRecall() const{ return m_microRecall; } /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.

*/ inline bool MicroRecallHasBeenSet() const { return m_microRecallHasBeenSet; } /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.

*/ inline void SetMicroRecall(double value) { m_microRecallHasBeenSet = true; m_microRecall = value; } /** *

A measure of how complete the classifier results are for the test data. High * recall means that the classifier returned most of the relevant results. * Specifically, this indicates how many of the correct categories in the text that * the model can predict. It is a percentage of correct categories in the text that * can found. Instead of averaging the recall scores of all labels (as with * Recall), micro Recall is based on the overall score of all recall scores added * together.

*/ inline ClassifierEvaluationMetrics& WithMicroRecall(double value) { SetMicroRecall(value); return *this;} /** *

A measure of how accurate the classifier results are for the test data. It is * a combination of the Micro Precision and Micro Recall * values. The Micro F1Score is the harmonic mean of the two scores. * The highest score is 1, and the worst score is 0.

*/ inline double GetMicroF1Score() const{ return m_microF1Score; } /** *

A measure of how accurate the classifier results are for the test data. It is * a combination of the Micro Precision and Micro Recall * values. The Micro F1Score is the harmonic mean of the two scores. * The highest score is 1, and the worst score is 0.

*/ inline bool MicroF1ScoreHasBeenSet() const { return m_microF1ScoreHasBeenSet; } /** *

A measure of how accurate the classifier results are for the test data. It is * a combination of the Micro Precision and Micro Recall * values. The Micro F1Score is the harmonic mean of the two scores. * The highest score is 1, and the worst score is 0.

*/ inline void SetMicroF1Score(double value) { m_microF1ScoreHasBeenSet = true; m_microF1Score = value; } /** *

A measure of how accurate the classifier results are for the test data. It is * a combination of the Micro Precision and Micro Recall * values. The Micro F1Score is the harmonic mean of the two scores. * The highest score is 1, and the worst score is 0.

*/ inline ClassifierEvaluationMetrics& WithMicroF1Score(double value) { SetMicroF1Score(value); return *this;} /** *

Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.

*/ inline double GetHammingLoss() const{ return m_hammingLoss; } /** *

Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.

*/ inline bool HammingLossHasBeenSet() const { return m_hammingLossHasBeenSet; } /** *

Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.

*/ inline void SetHammingLoss(double value) { m_hammingLossHasBeenSet = true; m_hammingLoss = value; } /** *

Indicates the fraction of labels that are incorrectly predicted. Also seen as * the fraction of wrong labels compared to the total number of labels. Scores * closer to zero are better.

*/ inline ClassifierEvaluationMetrics& WithHammingLoss(double value) { SetHammingLoss(value); return *this;} private: double m_accuracy; bool m_accuracyHasBeenSet; double m_precision; bool m_precisionHasBeenSet; double m_recall; bool m_recallHasBeenSet; double m_f1Score; bool m_f1ScoreHasBeenSet; double m_microPrecision; bool m_microPrecisionHasBeenSet; double m_microRecall; bool m_microRecallHasBeenSet; double m_microF1Score; bool m_microF1ScoreHasBeenSet; double m_hammingLoss; bool m_hammingLossHasBeenSet; }; } // namespace Model } // namespace Comprehend } // namespace Aws