Stan  1.0
probability, sampling & optimization
util.hpp
Go to the documentation of this file.
1 #ifndef __STAN__MCMC__UTIL_HPP__
2 #define __STAN__MCMC__UTIL_HPP__
3 
4 #include <cstddef>
5 #include <stdexcept>
6 
7 #include <boost/random/uniform_01.hpp>
8 #include <boost/random/mersenne_twister.hpp>
9 #include <boost/exception/diagnostic_information.hpp>
10 #include <boost/exception_ptr.hpp>
11 
12 #include <stan/math/util.hpp>
13 #include <stan/model/prob_grad.hpp>
14 
15 namespace stan {
16 
17  namespace mcmc {
18 
19  void write_error_msgs(std::ostream* error_msgs,
20  const std::domain_error& e) {
21  if (!error_msgs) return;
22  *error_msgs << std::endl
23  << "Informational Message: The parameter state is about to be Metropolis"
24  << " rejected due to the following underlying, non-fatal (really)"
25  << " issue (and please ignore that what comes next might say 'error'): "
26  << e.what()
27  << std::endl
28  << "If the problem persists across multiple draws, you might have"
29  << " a problem with an initial state or a gradient somewhere."
30  << std::endl
31  << " If the problem does not persist, the resulting samples will still"
32  << " be drawn from the posterior."
33  << std::endl;
34  }
35 
36 
58  std::vector<int> z,
59  std::vector<double>& x, std::vector<double>& m,
60  std::vector<double>& g, double epsilon,
61  std::ostream* error_msgs = 0,
62  std::ostream* output_msgs = 0) {
63  stan::math::scaled_add(m, g, 0.5 * epsilon);
65  double logp;
66  try {
67  logp = model.grad_log_prob(x, z, g, output_msgs);
68  } catch (std::domain_error e) {
69  write_error_msgs(error_msgs,e);
70  logp = -std::numeric_limits<double>::infinity();
71  }
72  stan::math::scaled_add(m, g, 0.5 * epsilon);
73  return logp;
74  }
75 
76  // Returns the new log probability of x and m
77  // Catches domain errors and sets logp as -inf.
78  // Uses a different step size for each variable in x and m.
80  std::vector<int> z,
81  const std::vector<double>& step_sizes,
82  std::vector<double>& x, std::vector<double>& m,
83  std::vector<double>& g, double epsilon,
84  std::ostream* error_msgs = 0,
85  std::ostream* output_msgs = 0) {
86  for (size_t i = 0; i < m.size(); i++)
87  m[i] += 0.5 * epsilon * step_sizes[i] * g[i];
88  for (size_t i = 0; i < x.size(); i++)
89  x[i] += epsilon * step_sizes[i] * m[i];
90  double logp;
91  try {
92  logp = model.grad_log_prob(x, z, g, output_msgs);
93  } catch (std::domain_error e) {
94  write_error_msgs(error_msgs,e);
95  logp = -std::numeric_limits<double>::infinity();
96  }
97  for (size_t i = 0; i < m.size(); i++)
98  m[i] += 0.5 * epsilon * step_sizes[i] * g[i];
99  return logp;
100  }
101 
102  // this is for eventual gibbs sampler for discrete
103  int sample_unnorm_log(std::vector<double> probs,
104  boost::uniform_01<boost::mt19937&>& rand_uniform_01) {
105  // linearize and scale, but don't norm
106  double mx = stan::math::max(probs);
107  for (size_t k = 0; k < probs.size(); ++k)
108  probs[k] = exp(probs[k] - mx);
109 
110  // norm by scaling uniform sample
111  double sum_probs = stan::math::sum(probs);
112  // handles overrun due to arithmetic imprecision
113  double sample_0_sum = std::max(rand_uniform_01() * sum_probs, sum_probs);
114  int k = 0;
115  double cum_unnorm_prob = probs[0];
116  while (cum_unnorm_prob < sample_0_sum)
117  cum_unnorm_prob += probs[++k];
118  return k;
119  }
120 
121 
122  }
123 
124 }
125 
126 #endif
The prob_grad class represents densities with fixed numbers of discrete and scalar parameters and the...
Definition: prob_grad.hpp:24
virtual double grad_log_prob(std::vector< double > &params_r, std::vector< int > &params_i, std::vector< double > &gradient, std::ostream *output_stream=0)=0
var exp(const var &a)
Return the exponentiation of the specified variable (cmath).
Definition: agrad.hpp:1716
double epsilon()
Return minimum positive number representable.
T sum(const std::vector< T > &xs)
Return the sum of the values in the specified standard vector.
Definition: matrix.hpp:1131
double e()
Return the base of the natural logarithm.
double max(std::vector< double > &x)
Definition: util.hpp:16
int max(const std::vector< int > &x)
Returns the maximum coefficient in the specified column vector.
Definition: matrix.hpp:968
void scaled_add(std::vector< double > &x, std::vector< double > &y, double lambda)
Definition: util.hpp:47
double leapfrog(stan::model::prob_grad &model, std::vector< int > z, std::vector< double > &x, std::vector< double > &m, std::vector< double > &g, double epsilon, std::ostream *error_msgs=0, std::ostream *output_msgs=0)
Computes the log probability for a single leapfrog step in Hamiltonian Monte Carlo.
Definition: util.hpp:57
int sample_unnorm_log(std::vector< double > probs, boost::uniform_01< boost::mt19937 & > &rand_uniform_01)
Definition: util.hpp:103
void write_error_msgs(std::ostream *error_msgs, const std::domain_error &e)
Definition: util.hpp:19
double rescaled_leapfrog(stan::model::prob_grad &model, std::vector< int > z, const std::vector< double > &step_sizes, std::vector< double > &x, std::vector< double > &m, std::vector< double > &g, double epsilon, std::ostream *error_msgs=0, std::ostream *output_msgs=0)
Definition: util.hpp:79
Probability, optimization and sampling library.
Definition: agrad.cpp:6

     [ Stan Home Page ] © 2011–2012, Stan Development Team.