Skip to content

Commit

Permalink
tests..
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis committed Feb 3, 2023
1 parent 613aab7 commit 1dbc98b
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 19 deletions.
22 changes: 11 additions & 11 deletions src/common/stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,18 @@
#include "common.h" // AssertGPUSupport, OptionalWeights
#include "transform_iterator.h" // MakeIndexTransformIter
#include "xgboost/context.h" // Context
#include "xgboost/linalg.h"
#include "xgboost/logging.h" // CHECK_GE
#include "xgboost/linalg.h" // TensorView,VectorView
#include "xgboost/logging.h" // CHECK_GE

namespace xgboost {
namespace common {

/**
* \brief Percentile with masked array using linear interpolation.
* @brief Quantile using linear interpolation.
*
* https://www.itl.nist.gov/div898/handbook/prc/section2/prc262.htm
*
* \param alpha Percentile, must be in range [0, 1].
* \param alpha Quantile, must be in range [0, 1].
* \param begin Iterator begin for input array.
* \param end Iterator end for input array.
*
Expand All @@ -36,7 +36,7 @@ float Quantile(double alpha, Iter const& begin, Iter const& end) {
return std::numeric_limits<float>::quiet_NaN();
}

std::vector<size_t> sorted_idx(n);
std::vector<std::size_t> sorted_idx(n);
std::iota(sorted_idx.begin(), sorted_idx.end(), 0);
if (omp_in_parallel()) {
std::stable_sort(sorted_idx.begin(), sorted_idx.end(),
Expand All @@ -56,7 +56,7 @@ float Quantile(double alpha, Iter const& begin, Iter const& end) {
if (alpha >= (n / (n + 1))) {
return val(sorted_idx.size() - 1);
}
assert(n != 0 && "The number of rows in a leaf can not be zero.");

double x = alpha * static_cast<double>((n + 1));
double k = std::floor(x) - 1;
CHECK_GE(k, 0);
Expand All @@ -71,11 +71,11 @@ float Quantile(double alpha, Iter const& begin, Iter const& end) {
* \brief Calculate the weighted quantile with step function. Unlike the unweighted
* version, no interpolation is used.
*
* See https://aakinshin.net/posts/weighted-quantiles/ for some discussion on computing
* See https://aakinshin.net/posts/weighted-quantiles/ for some discussions on computing
* weighted quantile with interpolation.
*/
template <typename Iter, typename WeightIter>
float WeightedQuantile(double alpha, Iter begin, Iter end, WeightIter weights) {
float WeightedQuantile(double alpha, Iter begin, Iter end, WeightIter w_begin) {
auto n = static_cast<double>(std::distance(begin, end));
if (n == 0) {
return std::numeric_limits<float>::quiet_NaN();
Expand All @@ -95,12 +95,12 @@ float WeightedQuantile(double alpha, Iter begin, Iter end, WeightIter weights) {

std::vector<float> weight_cdf(n); // S_n
// weighted cdf is sorted during construction
weight_cdf[0] = *(weights + sorted_idx[0]);
weight_cdf[0] = *(w_begin + sorted_idx[0]);
for (size_t i = 1; i < n; ++i) {
weight_cdf[i] = weight_cdf[i - 1] + *(weights + sorted_idx[i]);
weight_cdf[i] = weight_cdf[i - 1] + w_begin[sorted_idx[i]];
}
float thresh = weight_cdf.back() * alpha;
size_t idx =
std::size_t idx =
std::lower_bound(weight_cdf.cbegin(), weight_cdf.cend(), thresh) - weight_cdf.cbegin();
idx = std::min(idx, static_cast<size_t>(n - 1));
return val(idx);
Expand Down
6 changes: 0 additions & 6 deletions src/objective/quantile_obj.cu
Original file line number Diff line number Diff line change
Expand Up @@ -168,12 +168,6 @@ class QuantileRegression : public ObjFunction {
// For multiple quantiles, we should extend the base score to a vector instead of
// computing the average. For now, this is a workaround.
linalg::Vector<float> temp;
auto h_base_score = base_score->HostView();
std::cout << "Base scores:" << std::endl;
for (std::size_t i =0; i < h_base_score.Size(); ++i) {
std::cout << h_base_score(i) << ", ";
}
std::cout << std::endl;
common::Mean(ctx_, *base_score, &temp);
double meanq = temp(0) * sw;

Expand Down
14 changes: 12 additions & 2 deletions tests/cpp/objective/test_quantile_obj.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ TEST(Objective, DeclareUnifiedTest(Quantile)) {

TEST(Objective, DeclareUnifiedTest(QuantileIntercept)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Args args{{"quantile_alpha", "[0.6]"}};
Args args{{"quantile_alpha", "[0.6, 0.8]"}};
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:quantileerror", &ctx)};
obj->Configure(args);

Expand All @@ -59,6 +59,16 @@ TEST(Objective, DeclareUnifiedTest(QuantileIntercept)) {
linalg::Vector<float> base_scores;
obj->InitEstimation(info, &base_scores);
ASSERT_EQ(base_scores.Size(), 1) << "Vector is not yet supported.";
std::cout << base_scores(0) << std::endl;
// mean([5.6, 7.8])
ASSERT_NEAR(base_scores(0), 6.7, kRtEps);

for (std::size_t i = 0; i < info.num_row_; ++i) {
info.weights_.HostVector().emplace_back(info.num_row_ - i - 1.0);
}

obj->InitEstimation(info, &base_scores);
ASSERT_EQ(base_scores.Size(), 1) << "Vector is not yet supported.";
// mean([3, 5])
ASSERT_NEAR(base_scores(0), 4.0, kRtEps);
}
} // namespace xgboost

0 comments on commit 1dbc98b

Please sign in to comment.