Monte Carlo Integration Library 1.0
High-performance Monte Carlo methods for numerical integration and optimization
MCMeanEstimator.tpp
Go to the documentation of this file.
1/**
2 * @file MCMeanEstimator.tpp
3 * @brief MCMeanEstimator template implementation.
4 * @details Implements standard Monte Carlo mean estimation with uniform sampling,
5 * domain rejection, and standard error computation.
6 */
7
8#include <algorithm>
9#include <cmath>
10#include <optional>
11#include <stdexcept>
12
13#include <omp.h>
14
15#include "montecarlo/rng/rng_factory.hpp"
16
17namespace mc::estimators {
18
19/**
20 * @brief Estimates the mean of a function over a domain using standard Monte Carlo.
21 * @tparam dim The dimensionality of the integration domain.
22 *
23 * @param domain The integration domain constraint.
24 * @param seed Random seed for reproducibility.
25 * @param n_samples Number of samples to generate.
26 * @param f The function to estimate the mean of.
27 *
28 * @return MeanEstimate<dim> containing:
29 * - n_samples: Total samples generated
30 * - n_inside: Samples that fell within the domain
31 * - mean: Estimated mean value (sum of accepted samples / n_samples)
32 * - stderr: Standard error of the estimate
33 *
34 * @throws std::invalid_argument If n_samples is zero.
35 *
36 * @details
37 * The estimator uses uniform rejection sampling from the bounding box.
38 * The algorithm:
39 * 1. Samples uniformly from the domain's bounding box
40 * 2. Rejects samples outside the domain
41 * 3. Evaluates the function at accepted samples
42 * 4. Accumulates first and second moments
43 * 5. Computes sample variance for standard error
44 *
45 * The mean is computed as the sum of all function evaluations (including
46 * zero contributions from rejected samples) divided by total samples, which
47 * naturally incorporates the domain's volume ratio.
48 *
49 * The computation is parallelized across available threads with each thread
50 * maintaining independent accumulators and uniform distributions to avoid
51 * synchronization overhead.
52 *
53 * @note Uses OpenMP for parallel sampling.
54 * @note Thread-local distribution states improve cache locality.
55 */
56template <std::size_t dim>
57MeanEstimate<dim> MCMeanEstimator<dim>::estimate(const mc::domains::IntegrationDomain<dim>& domain,
58 std::uint32_t seed,
59 std::size_t n_samples,
60 const std::function<double(const mc::geom::Point<dim>&)>& f) const
61{
62 if (n_samples == 0) throw std::invalid_argument("n_samples must be > 0");
63
64 auto bounds = domain.getBounds();
65 for (std::size_t i = 0; i < dim; ++i) {
66 dist[i] = std::uniform_real_distribution<double>(bounds[i].first, bounds[i].second);
67 }
68
69 const int T = omp_get_max_threads();
70 const std::size_t base = n_samples / static_cast<std::size_t>(T);
71 const std::size_t rem = n_samples % static_cast<std::size_t>(T);
72
73 double sum = 0.0;
74 double sum2 = 0.0;
75 std::size_t inside_total = 0;
76
77#pragma omp parallel for reduction(+:sum,sum2,inside_total)
78 for (int tid = 0; tid < T; ++tid) {
79 const std::size_t n_local = base + (static_cast<std::size_t>(tid) < rem ? 1u : 0u);
80
81 auto rng = mc::rng::make_engine_with_seed(std::optional<std::uint32_t>{seed},
82 static_cast<std::uint64_t>(tid));
83
84 auto local_dist = dist;
85
86 for (std::size_t k = 0; k < n_local; ++k) {
87 geom::Point<dim> p;
88 for (std::size_t j = 0; j < dim; ++j) {
89 p[j] = local_dist[j](rng);
90 }
91
92 if (domain.isInside(p)) {
93 const double term = f(p);
94 sum += term;
95 sum2 += term * term;
96 inside_total += 1;
97 }
98 }
99 }
100
101 MeanEstimate<dim> out;
102 out.n_samples = n_samples;
103 out.n_inside = inside_total;
104
105 out.mean = sum / static_cast<double>(n_samples);
106 const double e2 = sum2 / static_cast<double>(n_samples);
107 const double var = std::max(0.0, e2 - out.mean * out.mean);
108 out.stderr = std::sqrt(var / static_cast<double>(n_samples));
109
110 return out;
111}
112
113} // namespace mc::estimators