Stan  1.0
probability, sampling & optimization
 All Classes Namespaces Files Functions Variables Typedefs Enumerator Friends Macros Pages
util.hpp
Go to the documentation of this file.
1 #ifndef __STAN__MCMC__UTIL_HPP__
2 #define __STAN__MCMC__UTIL_HPP__
3 
4 #include <cstddef>
5 #include <stdexcept>
6 
7 #include <boost/random/uniform_01.hpp>
8 #include <boost/random/mersenne_twister.hpp>
9 #include <boost/exception/diagnostic_information.hpp>
10 #include <boost/exception_ptr.hpp>
11 
12 #include <stan/math/util.hpp>
13 #include <stan/model/prob_grad.hpp>
14 
15 namespace stan {
16 
17  namespace mcmc {
18 
19  void write_error_msgs(std::ostream* error_msgs,
20  const std::domain_error& e) {
21  if (!error_msgs) return;
22  *error_msgs << std::endl
23  << "Informational Message: The parameter state is about to be Metropolis"
24  << " rejected due to the following underlying, non-fatal (really)"
25  << " issue (and please ignore that what comes next might say 'error'): "
26  << e.what()
27  << std::endl
28  << "If the problem persists across multiple draws, you might have"
29  << " a problem with an initial state or a gradient somewhere."
30  << std::endl
31  << " If the problem does not persist, the resulting samples will still"
32  << " be drawn from the posterior."
33  << std::endl;
34  }
35 
36 
58  std::vector<int> z,
59  std::vector<double>& x, std::vector<double>& m,
60  std::vector<double>& g, double epsilon,
61  std::ostream* error_msgs = 0,
62  std::ostream* output_msgs = 0) {
63  stan::math::scaled_add(m, g, 0.5 * epsilon);
64  stan::math::scaled_add(x, m, epsilon);
65  double logp;
66  try {
67  logp = model.grad_log_prob(x, z, g, output_msgs);
68  } catch (std::domain_error e) {
69  write_error_msgs(error_msgs,e);
70  logp = -std::numeric_limits<double>::infinity();
71  }
72  stan::math::scaled_add(m, g, 0.5 * epsilon);
73  return logp;
74  }
75 
76  // Returns the new log probability of x and m
77  // Catches domain errors and sets logp as -inf.
78  // Uses a different step size for each variable in x and m.
80  std::vector<int> z,
81  const std::vector<double>& step_sizes,
82  std::vector<double>& x, std::vector<double>& m,
83  std::vector<double>& g, double epsilon,
84  std::ostream* error_msgs = 0,
85  std::ostream* output_msgs = 0) {
86  for (size_t i = 0; i < m.size(); i++)
87  m[i] += 0.5 * epsilon * step_sizes[i] * g[i];
88  for (size_t i = 0; i < x.size(); i++)
89  x[i] += epsilon * step_sizes[i] * m[i];
90  double logp;
91  try {
92  logp = model.grad_log_prob(x, z, g, output_msgs);
93  } catch (std::domain_error e) {
94  write_error_msgs(error_msgs,e);
95  logp = -std::numeric_limits<double>::infinity();
96  }
97  for (size_t i = 0; i < m.size(); i++)
98  m[i] += 0.5 * epsilon * step_sizes[i] * g[i];
99  return logp;
100  }
101 
102  // this is for eventual gibbs sampler for discrete
103  int sample_unnorm_log(std::vector<double> probs,
104  boost::uniform_01<boost::mt19937&>& rand_uniform_01) {
105  // linearize and scale, but don't norm
106  double mx = stan::math::max(probs);
107  for (size_t k = 0; k < probs.size(); ++k)
108  probs[k] = exp(probs[k] - mx);
109 
110  // norm by scaling uniform sample
111  double sum_probs = stan::math::sum(probs);
112  // handles overrun due to arithmetic imprecision
113  double sample_0_sum = std::max(rand_uniform_01() * sum_probs, sum_probs);
114  int k = 0;
115  double cum_unnorm_prob = probs[0];
116  while (cum_unnorm_prob < sample_0_sum)
117  cum_unnorm_prob += probs[++k];
118  return k;
119  }
120 
121 
122  }
123 
124 }
125 
126 #endif

     [ Stan Home Page ] © 2011–2012, Stan Development Team.