aboutsummaryrefslogtreecommitdiffstats
path: root/library/cpp/linear_regression
diff options
context:
space:
mode:
authortender-bum <tender-bum@yandex-team.ru>2022-02-10 16:50:01 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:50:01 +0300
commit4aef354b224559d2b031487a10d4f5cc6e82e95a (patch)
tree5d5cb817648f650d76cf1076100726fd9b8448e8 /library/cpp/linear_regression
parentc78b06a63de7beec995c1007bc5332bdf3d75b69 (diff)
downloadydb-4aef354b224559d2b031487a10d4f5cc6e82e95a.tar.gz
Restoring authorship annotation for <tender-bum@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'library/cpp/linear_regression')
-rw-r--r--library/cpp/linear_regression/linear_regression_ut.cpp14
1 files changed, 7 insertions, 7 deletions
diff --git a/library/cpp/linear_regression/linear_regression_ut.cpp b/library/cpp/linear_regression/linear_regression_ut.cpp
index fc266e1616..e71a16b67a 100644
--- a/library/cpp/linear_regression/linear_regression_ut.cpp
+++ b/library/cpp/linear_regression/linear_regression_ut.cpp
@@ -31,7 +31,7 @@ Y_UNIT_TEST_SUITE(TLinearRegressionTest) {
deviationCalculator.Add(arguments[i], weights[i]);
}
- double actualMean = InnerProduct(arguments, weights) / Accumulate(weights, 0.0);
+ double actualMean = InnerProduct(arguments, weights) / Accumulate(weights, 0.0);
double actualDeviation = 0.;
for (size_t i = 0; i < arguments.size(); ++i) {
double deviation = arguments[i] - actualMean;
@@ -47,7 +47,7 @@ Y_UNIT_TEST_SUITE(TLinearRegressionTest) {
UNIT_ASSERT(IsValidFloat(meanCalculator.GetSumWeights()));
UNIT_ASSERT(IsValidFloat(deviationCalculator.GetSumWeights()));
UNIT_ASSERT_DOUBLES_EQUAL(meanCalculator.GetSumWeights(), deviationCalculator.GetSumWeights(), 0);
- UNIT_ASSERT_DOUBLES_EQUAL(meanCalculator.GetSumWeights(), Accumulate(weights, 0.0), 0);
+ UNIT_ASSERT_DOUBLES_EQUAL(meanCalculator.GetSumWeights(), Accumulate(weights, 0.0), 0);
ValueIsCorrect(deviationCalculator.GetDeviation(), actualDeviation, 1e-5);
@@ -94,8 +94,8 @@ Y_UNIT_TEST_SUITE(TLinearRegressionTest) {
covariationCalculator.Add(firstValues[i], secondValues[i], weights[i]);
}
- const double firstValuesMean = InnerProduct(firstValues, weights) / Accumulate(weights, 0.0);
- const double secondValuesMean = InnerProduct(secondValues, weights) / Accumulate(weights, 0.0);
+ const double firstValuesMean = InnerProduct(firstValues, weights) / Accumulate(weights, 0.0);
+ const double secondValuesMean = InnerProduct(secondValues, weights) / Accumulate(weights, 0.0);
double actualCovariation = 0.;
for (size_t i = 0; i < argumentsCount; ++i) {
@@ -110,7 +110,7 @@ Y_UNIT_TEST_SUITE(TLinearRegressionTest) {
UNIT_ASSERT_DOUBLES_EQUAL(covariationCalculator.GetSecondValueMean(), secondValuesMean, 1e-10);
UNIT_ASSERT(IsValidFloat(covariationCalculator.GetSumWeights()));
- UNIT_ASSERT_DOUBLES_EQUAL(covariationCalculator.GetSumWeights(), Accumulate(weights, 0.0), 0);
+ UNIT_ASSERT_DOUBLES_EQUAL(covariationCalculator.GetSumWeights(), Accumulate(weights, 0.0), 0);
ValueIsCorrect(covariationCalculator.GetCovariation(), actualCovariation, 1e-5);
@@ -170,7 +170,7 @@ Y_UNIT_TEST_SUITE(TLinearRegressionTest) {
}
if (!regularizationThreshold) {
- UNIT_ASSERT(predictedSumSquaredErrors < Accumulate(weights, 0.0) * randomError * randomError);
+ UNIT_ASSERT(predictedSumSquaredErrors < Accumulate(weights, 0.0) * randomError * randomError);
}
UNIT_ASSERT_DOUBLES_EQUAL(predictedSumSquaredErrors, sumSquaredErrors, 1e-8);
}
@@ -227,7 +227,7 @@ Y_UNIT_TEST_SUITE(TLinearRegressionTest) {
}
UNIT_ASSERT_DOUBLES_EQUAL(model.GetIntercept(), intercept, 1e-2);
- const double expectedSumSquaredErrors = randomError * randomError * Accumulate(weights, 0.0);
+ const double expectedSumSquaredErrors = randomError * randomError * Accumulate(weights, 0.0);
UNIT_ASSERT_DOUBLES_EQUAL(lrSolver.SumSquaredErrors(), expectedSumSquaredErrors, expectedSumSquaredErrors * 0.01);
}