Stan  1.0
probability, sampling & optimization
command.hpp
Go to the documentation of this file.
1 #ifndef __STAN__GM__COMMAND_HPP__
2 #define __STAN__GM__COMMAND_HPP__
3 
4 //#include <cmath>
5 //#include <cstddef>
6 //#include <boost/math/special_functions/fpclassify.hpp>
7 #include <boost/date_time/posix_time/posix_time_types.hpp>
8 //#include <iomanip>
9 //#include <iostream>
10 #include <fstream>
11 //#include <sstream>
12 //#include <vector>
13 #include <boost/random/additive_combine.hpp> // L'Ecuyer RNG
14 //#include <boost/random/mersenne_twister.hpp>
15 //#include <boost/random/uniform_01.hpp>
16 #include <boost/random/uniform_real_distribution.hpp>
17 #include <stan/version.hpp>
18 #include <stan/io/cmd_line.hpp>
19 #include <stan/io/dump.hpp>
20 //#include <stan/mcmc/adaptive_sampler.hpp>
22 //#include <stan/mcmc/hmc.hpp>
23 #include <stan/mcmc/nuts.hpp>
24 #include <stan/mcmc/nuts_diag.hpp>
25 //#include <stan/model/prob_grad_ad.hpp>
26 //#include <stan/model/prob_grad.hpp>
27 //#include <stan/mcmc/sampler.hpp>
29 
30 namespace stan {
31 
32  namespace gm {
33 
34 
35  void print_nuts_help(std::string cmd) {
37 
38  std::cout << std::endl;
39  std::cout << "Compiled Stan Graphical Model Command" << std::endl;
40  std::cout << std::endl;
41 
42  std::cout << "USAGE: " << cmd << " [options]" << std::endl;
43  std::cout << std::endl;
44 
45  std::cout << "OPTIONS:" << std::endl;
46  std::cout << std::endl;
47 
48  print_help_option(&std::cout,
49  "help","",
50  "Display this information");
51 
52  print_help_option(&std::cout,
53  "data","file",
54  "Read data from specified dump-format file",
55  "required if model declares data");
56 
57  print_help_option(&std::cout,
58  "init","file",
59  "Use initial values from specified file or zero values if <file>=0",
60  "default is random initialization");
61 
62  print_help_option(&std::cout,
63  "samples","file",
64  "File into which samples are written",
65  "default = samples.csv");
66 
67  print_help_option(&std::cout,
68  "append_samples","",
69  "Append samples to existing file if it exists",
70  "does not write header in append mode");
71 
72  print_help_option(&std::cout,
73  "seed","int",
74  "Random number generation seed",
75  "default = randomly generated from time");
76 
77  print_help_option(&std::cout,
78  "chain_id","int",
79  "Markov chain identifier",
80  "default = 1");
81 
82  print_help_option(&std::cout,
83  "iter","+int",
84  "Total number of iterations, including warmup",
85  "default = 2000");
86 
87  print_help_option(&std::cout,
88  "warmup","+int",
89  "Discard the specified number of initial samples",
90  "default = iter / 2");
91 
92  print_help_option(&std::cout,
93  "thin","+int",
94  "Period between saved samples after warm up",
95  "default = max(1, floor(iter - warmup) / 1000)");
96 
97  print_help_option(&std::cout,
98  "refresh","int",
99  "Period between samples updating progress report print (0 for no printing)",
100  "default = max(1,iter/200))");
101 
102  print_help_option(&std::cout,
103  "leapfrog_steps","int",
104  "Number of leapfrog steps; -1 for no-U-turn adaptation",
105  "default = -1");
106 
107  print_help_option(&std::cout,
108  "max_treedepth","int",
109  "Limit NUTS leapfrog steps to 2^max_tree_depth; -1 for no limit",
110  "default = 10");
111 
112  print_help_option(&std::cout,
113  "epsilon","float",
114  "Initial value for step size, or -1 to set automatically",
115  "default = -1");
116 
117  print_help_option(&std::cout,
118  "epsilon_pm","[0,1]",
119  "Sample epsilon +/- epsilon * epsilon_pm",
120  "default = 0.0");
121 
122  print_help_option(&std::cout,
123  "equal_step_sizes","",
124  "Use same step size for every parameter with NUTS",
125  "default is to estimate varying step sizes during warmup");
126 
127  print_help_option(&std::cout,
128  "delta","[0,1]",
129  "Accuracy target for step-size adaptation (higher means smaller step sizes)",
130  "default = 0.5");
131 
132  print_help_option(&std::cout,
133  "gamma","+float",
134  "Gamma parameter for dual averaging step-size adaptation",
135  "default = 0.05");
136 
137  print_help_option(&std::cout,
138  "save_warmup","",
139  "Save the warmup samples");
140 
141  print_help_option(&std::cout,
142  "test_grad","",
143  "Test gradient calculations using finite differences");
144 
145  print_help_option(&std::cout,
146  "point_estimate","",
147  "Fit point estimate of hidden parameters by maximizing log joint probability");
148 
149  std::cout << std::endl;
150  }
151 
152  bool do_print(int n, int refresh) {
153  return (refresh > 0)
154  && (n == 0
155  || ((n + 1) % refresh == 0) );
156  }
157 
158  template <class Sampler, class Model>
159  void sample_from(Sampler& sampler,
160  bool epsilon_adapt,
161  int refresh,
162  int num_iterations,
163  int num_warmup,
164  int num_thin,
165  bool save_warmup,
166  std::ostream& sample_file_stream,
167  std::vector<double>& params_r,
168  std::vector<int>& params_i,
169  Model& model) {
170 
171  sampler.set_params(params_r,params_i);
172 
173  int it_print_width = std::ceil(std::log10(num_iterations));
174  std::cout << std::endl;
175 
176  if (epsilon_adapt)
177  sampler.adapt_on();
178  for (int m = 0; m < num_iterations; ++m) {
179  if (do_print(m,refresh)) {
180  std::cout << "Iteration: ";
181  std::cout << std::setw(it_print_width) << (m + 1)
182  << " / " << num_iterations;
183  std::cout << " [" << std::setw(3)
184  << static_cast<int>((100.0 * (m + 1))/num_iterations)
185  << "%] ";
186  std::cout << ((m < num_warmup) ? " (Adapting)" : " (Sampling)");
187  std::cout << std::endl;
188  std::cout.flush();
189  }
190  if (m < num_warmup) {
191  if (save_warmup && (m % num_thin) == 0) {
192  stan::mcmc::sample sample = sampler.next();
193 
194  // FIXME: use csv_writer arg to make comma optional?
195  sample_file_stream << sample.log_prob() << ',';
196  sampler.write_sampler_params(sample_file_stream);
197  sample.params_r(params_r);
198  sample.params_i(params_i);
199  model.write_csv(params_r,params_i,sample_file_stream);
200  } else {
201  sampler.next(); // discard
202  }
203  } else {
204  if (epsilon_adapt && sampler.adapting()) {
205  sampler.adapt_off();
206  sampler.write_adaptation_params(sample_file_stream);
207  }
208  if (((m - num_warmup) % num_thin) != 0) {
209  sampler.next();
210  continue;
211  } else {
212  stan::mcmc::sample sample = sampler.next();
213 
214  // FIXME: use csv_writer arg to make comma optional?
215  sample_file_stream << sample.log_prob() << ',';
216  sampler.write_sampler_params(sample_file_stream);
217  sample.params_r(params_r);
218  sample.params_i(params_i);
219  model.write_csv(params_r,params_i,sample_file_stream);
220  }
221  }
222  }
223  }
224 
225  void write_comment(std::ostream& o) {
226  o << "#" << std::endl;
227  }
228  template <typename M>
229  void write_comment(std::ostream& o,
230  const M& msg) {
231  o << "# " << msg << std::endl;
232  }
233  template <typename K, typename V>
234  void write_comment_property(std::ostream& o,
235  const K& key,
236  const V& val) {
237  o << "# " << key << "=" << val << std::endl;
238  }
239 
240  template <class Model>
241  int nuts_command(int argc, const char* argv[]) {
242 
243  stan::io::cmd_line command(argc,argv);
244 
245  if (command.has_flag("help")) {
246  print_nuts_help(argv[0]);
247  return 0;
248  }
249 
250  std::string data_file;
251  command.val("data",data_file);
252  std::fstream data_stream(data_file.c_str(),
253  std::fstream::in);
254  stan::io::dump data_var_context(data_stream);
255  data_stream.close();
256 
257  Model model(data_var_context, &std::cout);
258 
259  bool point_estimate = command.has_flag("point_estimate");
260 
261  std::string sample_file = "samples.csv";
262  command.val("samples",sample_file);
263 
264  unsigned int num_iterations = 2000U;
265  command.val("iter",num_iterations);
266 
267  unsigned int num_warmup = num_iterations / 2;
268  command.val("warmup",num_warmup);
269 
270  unsigned int calculated_thin = (num_iterations - num_warmup) / 1000U;
271  unsigned int num_thin = (calculated_thin > 1) ? calculated_thin : 1U;
272  command.val("thin",num_thin);
273 
274  bool user_supplied_thin = command.has_key("thin");
275 
276  int leapfrog_steps = -1;
277  command.val("leapfrog_steps",leapfrog_steps);
278 
279  double epsilon = -1.0;
280  command.val("epsilon",epsilon);
281 
282  int max_treedepth = 10;
283  command.val("max_treedepth",max_treedepth);
284 
285  double epsilon_pm = 0.0;
286  command.val("epsilon_pm",epsilon_pm);
287 
288  bool epsilon_adapt = epsilon <= 0.0;
289 
290  bool equal_step_sizes = command.has_flag("equal_step_sizes");
291 
292  double delta = 0.5;
293  command.val("delta", delta);
294 
295  double gamma = 0.05;
296  command.val("gamma", gamma);
297 
298  int refresh = num_iterations / 200;
299  refresh = refresh <= 0 ? 1 : refresh; // just for default
300  command.val("refresh",refresh);
301 
302  unsigned int random_seed = 0;
303  if (command.has_key("seed")) {
304  bool well_formed = command.val("seed",random_seed);
305  if (!well_formed) {
306  std::string seed_val;
307  command.val("seed",seed_val);
308  std::cerr << "value for seed must be integer"
309  << "; found value=" << seed_val << std::endl;
310  return -1;
311  }
312  } else {
313  random_seed
314  = (boost::posix_time::microsec_clock::universal_time() -
315  boost::posix_time::ptime(boost::posix_time::min_date_time))
316  .total_milliseconds();
317  }
318 
319  int chain_id = 1;
320  if (command.has_key("chain_id")) {
321  bool well_formed = command.val("chain_id",chain_id);
322  if (!well_formed || chain_id < 0) {
323  std::string chain_id_val;
324  command.val("chain_id",chain_id_val);
325  std::cerr << "value for chain_id must be positive integer"
326  << "; found chain_id=" << chain_id_val
327  << std::endl;
328  return -1;
329  }
330  }
331 
332  // FASTER, but no parallel guarantees:
333  // typedef boost::mt19937 rng_t;
334  // rng_t base_rng(static_cast<unsigned int>(random_seed + chain_id - 1);
335 
336  typedef boost::ecuyer1988 rng_t;
337  rng_t base_rng(random_seed);
338  // (2**50 = 1T samples, 1000 chains)
339  static boost::uintmax_t DISCARD_STRIDE = static_cast<boost::uintmax_t>(1) << 50;
340  // DISCARD_STRIDE <<= 50;
341  base_rng.discard(DISCARD_STRIDE * (chain_id - 1));
342 
343  std::vector<int> params_i;
344  std::vector<double> params_r;
345 
346  std::string init_val;
347  // parameter initialization
348  int num_init_tries = 1; // up here for printing below
349  if (command.has_key("init")) {
350  num_init_tries = -1;
351  command.val("init",init_val);
352  if (init_val == "0") {
353  params_i = std::vector<int>(model.num_params_i(),0);
354  params_r = std::vector<double>(model.num_params_r(),0.0);
355  } else {
356  try {
357  std::fstream init_stream(init_val.c_str(),std::fstream::in);
358  if (init_stream.fail()) {
359  std::string msg("ERROR: specified init file does not exist: ");
360  msg += init_val;
361  throw std::invalid_argument(msg);
362  }
363  stan::io::dump init_var_context(init_stream);
364  init_stream.close();
365  model.transform_inits(init_var_context,params_i,params_r);
366  } catch (const std::exception& e) {
367  std::cerr << "Error during user-specified initialization:"
368  << std::endl
369  << e.what()
370  << std::endl;
371  return -5;
372  }
373  }
374  } else {
375  init_val = "random initialization"; // for I/O
376  // init_rng generates uniformly from -2 to 2
377  boost::random::uniform_real_distribution<double>
378  init_range_distribution(-2.0,2.0);
379  boost::variate_generator<rng_t&,
380  boost::random::uniform_real_distribution<double> >
381  init_rng(base_rng,init_range_distribution);
382 
383  params_i = std::vector<int>(model.num_params_i(),0);
384  params_r = std::vector<double>(model.num_params_r());
385 
386  // retry inits until get a finite log prob value
387  std::vector<double> init_grad;
388  static int MAX_INIT_TRIES = 100;
389  for (num_init_tries = 1; num_init_tries <= MAX_INIT_TRIES; ++num_init_tries) {
390  for (size_t i = 0; i < params_r.size(); ++i)
391  params_r[i] = init_rng();
392  // FIXME: allow config vs. std::cout
393  double init_log_prob = model.grad_log_prob(params_r,params_i,init_grad,&std::cout);
394  if (!boost::math::isfinite(init_log_prob))
395  continue;
396  for (size_t i = 0; i < init_grad.size(); ++i)
397  if (!boost::math::isfinite(init_grad[i]))
398  continue;
399  break;
400  }
401  if (num_init_tries > MAX_INIT_TRIES) {
402  std::cout << "Initialization failed after " << MAX_INIT_TRIES
403  << " attempts. "
404  << " Try specifying initial values,"
405  << " reducing ranges of constrained values,"
406  << " or reparameterizing the model."
407  << std::endl;
408  return -1;
409  }
410  }
411 
412  bool save_warmup = command.has_flag("save_warmup");
413 
414  bool append_samples = command.has_flag("append_samples");
415  std::ios_base::openmode samples_append_mode
416  = append_samples
417  ? (std::fstream::out | std::fstream::app)
418  : std::fstream::out;
419 
420  if (command.has_flag("test_grad")) {
421  std::cout << std::endl << "TEST GRADIENT MODE" << std::endl;
422  return model.test_gradients(params_r,params_i);
423  }
424 
425  if (point_estimate) {
426  std::cout << "STAN OPTIMIZATION COMMAND" << std::endl;
427  if (data_file == "")
428  std::cout << "data = (specified model requires no data)" << std::endl;
429  else
430  std::cout << "data = " << data_file << std::endl;
431 
432  std::cout << "init = " << init_val << std::endl;
433  if (num_init_tries > 0)
434  std::cout << "init tries = " << num_init_tries << std::endl;
435 
436  std::cout << "output = " << sample_file << std::endl;
437  std::cout << "save_warmup = " << save_warmup<< std::endl;
438 
439  std::cout << "seed = " << random_seed
440  << " (" << (command.has_key("seed")
441  ? "user specified"
442  : "randomly generated") << ")"
443  << std::endl;
444 
445  std::fstream sample_stream(sample_file.c_str(),
446  samples_append_mode);
447 
448  write_comment(sample_stream,"Point Estimate Generated by Stan");
449  write_comment(sample_stream);
450  write_comment_property(sample_stream,"stan_version_major",stan::MAJOR_VERSION);
451  write_comment_property(sample_stream,"stan_version_minor",stan::MINOR_VERSION);
452  write_comment_property(sample_stream,"stan_version_patch",stan::PATCH_VERSION);
453  write_comment_property(sample_stream,"data",data_file);
454  write_comment_property(sample_stream,"init",init_val);
455  write_comment_property(sample_stream,"save_warmup",save_warmup);
456  write_comment_property(sample_stream,"seed",random_seed);
457  write_comment(sample_stream);
458 
459  sample_stream << "lp__,"; // log probability first
460  model.write_csv_header(sample_stream);
461 
462  std::vector<double> gradient;
463  double lp = model.grad_log_prob(params_r, params_i, gradient);
464 
465  double lastlp = lp - 1;
466  std::cout << "initial log joint probability = " << lp << std::endl;
467  int m = 0;
468  while ((lp - lastlp) / fabs(lp) > 1e-8) {
469  lastlp = lp;
470  lp = stan::optimization::newton_step(model, params_r, params_i);
471  std::cout << "Iteration ";
472  std::cout << std::setw(2) << (m + 1) << ". ";
473  std::cout << "Log joint probability = " << std::setw(10) << lp;
474  std::cout << ". Improved by " << (lp - lastlp) << ".";
475  std::cout << std::endl;
476  std::cout.flush();
477  m++;
478 // for (size_t i = 0; i < params_r.size(); i++)
479 // fprintf(stderr, "%f ", params_r[i]);
480 // fprintf(stderr, " %f (last = %f)\n", lp, lastlp);
481  if (save_warmup) {
482  sample_stream << lp << ',';
483  model.write_csv(params_r,params_i,sample_stream);
484  }
485  }
486 
487  sample_stream << lp << ',';
488  model.write_csv(params_r,params_i,sample_stream);
489 
490  return 0;
491  }
492 
493  std::cout << "STAN SAMPLING COMMAND" << std::endl;
494  if (data_file == "")
495  std::cout << "data = (specified model requires no data)" << std::endl;
496  else
497  std::cout << "data = " << data_file << std::endl;
498 
499  std::cout << "init = " << init_val << std::endl;
500  if (num_init_tries > 0)
501  std::cout << "init tries = " << num_init_tries << std::endl;
502 
503  std::cout << "samples = " << sample_file << std::endl;
504  std::cout << "append_samples = " << append_samples << std::endl;
505  std::cout << "save_warmup = " << save_warmup<< std::endl;
506 
507  std::cout << "seed = " << random_seed
508  << " (" << (command.has_key("seed")
509  ? "user specified"
510  : "randomly generated") << ")"
511  << std::endl;
512  std::cout << "chain_id = " << chain_id
513  << " (" << (command.has_key("chain_id")
514  ? "user specified"
515  : "default") << ")"
516  << std::endl;
517 
518  std::cout << "iter = " << num_iterations << std::endl;
519  std::cout << "warmup = " << num_warmup << std::endl;
520  std::cout << "thin = " << num_thin
521  << (user_supplied_thin ? " (user supplied)" : " (default)")
522  << std::endl;
523 
524  std::cout << "equal_step_sizes = " << equal_step_sizes << std::endl;
525  std::cout << "leapfrog_steps = " << leapfrog_steps << std::endl;
526  std::cout << "max_treedepth = " << max_treedepth << std::endl;;
527  std::cout << "epsilon = " << epsilon << std::endl;;
528  std::cout << "epsilon_pm = " << epsilon_pm << std::endl;;
529  std::cout << "delta = " << delta << std::endl;
530  std::cout << "gamma = " << gamma << std::endl;
531 
532  std::fstream sample_stream(sample_file.c_str(),
533  samples_append_mode);
534 
535  write_comment(sample_stream,"Samples Generated by Stan");
536  write_comment(sample_stream);
537  write_comment_property(sample_stream,"stan_version_major",stan::MAJOR_VERSION);
538  write_comment_property(sample_stream,"stan_version_minor",stan::MINOR_VERSION);
539  write_comment_property(sample_stream,"stan_version_patch",stan::PATCH_VERSION);
540  write_comment_property(sample_stream,"data",data_file);
541  write_comment_property(sample_stream,"init",init_val);
542  write_comment_property(sample_stream,"append_samples",append_samples);
543  write_comment_property(sample_stream,"save_warmup",save_warmup);
544  write_comment_property(sample_stream,"seed",random_seed);
545  write_comment_property(sample_stream,"chain_id",chain_id);
546  write_comment_property(sample_stream,"iter",num_iterations);
547  write_comment_property(sample_stream,"warmup",num_warmup);
548  write_comment_property(sample_stream,"thin",num_thin);
549  write_comment_property(sample_stream,"equal_step_sizes",equal_step_sizes);
550  write_comment_property(sample_stream,"leapfrog_steps",leapfrog_steps);
551  write_comment_property(sample_stream,"max_treedepth",max_treedepth);
552  write_comment_property(sample_stream,"epsilon",epsilon);
553  write_comment_property(sample_stream,"epsilon_pm",epsilon_pm);
554  write_comment_property(sample_stream,"delta",delta);
555  write_comment_property(sample_stream,"gamma",gamma);
556  write_comment(sample_stream);
557 
558  if (leapfrog_steps < 0 && !equal_step_sizes) {
559  // NUTS II (with varying step size estimation during warmup)
560  stan::mcmc::nuts_diag<rng_t> nuts2_sampler(model,
561  max_treedepth, epsilon,
562  epsilon_pm, epsilon_adapt,
563  delta, gamma,
564  base_rng, &params_r,
565  &params_i);
566 
567  // cut & paste (see below) to enable sample-specific params
568  if (!append_samples) {
569  sample_stream << "lp__,"; // log probability first
570  nuts2_sampler.write_sampler_param_names(sample_stream);
571  model.write_csv_header(sample_stream);
572  }
573  nuts2_sampler.set_error_stream(std::cout); // cout intended
574  nuts2_sampler.set_output_stream(std::cout);
575 
576  sample_from(nuts2_sampler,epsilon_adapt,refresh,
577  num_iterations,num_warmup,num_thin,save_warmup,
578  sample_stream,params_r,params_i,
579  model);
580 
581  } else if (leapfrog_steps < 0 && equal_step_sizes) {
582 
583  // NUTS I (equal step sizes)
584  stan::mcmc::nuts<rng_t> nuts_sampler(model,
585  max_treedepth, epsilon,
586  epsilon_pm, epsilon_adapt,
587  delta, gamma,
588  base_rng, &params_r,
589  &params_i);
590 
591  nuts_sampler.set_error_stream(std::cout);
592  nuts_sampler.set_output_stream(std::cout); // cout intended
593  // cut & paste (see below) to enable sample-specific params
594  if (!append_samples) {
595  sample_stream << "lp__,"; // log probability first
596  nuts_sampler.write_sampler_param_names(sample_stream);
597  model.write_csv_header(sample_stream);
598  }
599 
600  sample_from(nuts_sampler,epsilon_adapt,refresh,
601  num_iterations,num_warmup,num_thin,save_warmup,
602  sample_stream,params_r,params_i,
603  model);
604 
605  } else {
606 
607  // STANDARD HMC
608  stan::mcmc::adaptive_hmc<rng_t> hmc_sampler(model,
609  leapfrog_steps,
610  epsilon, epsilon_pm, epsilon_adapt,
611  delta, gamma,
612  base_rng, &params_r,
613  &params_i);
614 
615  hmc_sampler.set_error_stream(std::cout); // intended
616  hmc_sampler.set_output_stream(std::cout);
617  // cut & paste (see above) to enable sample-specific params
618  if (!append_samples) {
619  sample_stream << "lp__,"; // log probability first
620  hmc_sampler.write_sampler_param_names(sample_stream);
621  model.write_csv_header(sample_stream);
622  }
623 
624  sample_from(hmc_sampler,epsilon_adapt,refresh,
625  num_iterations,num_warmup,num_thin,save_warmup,
626  sample_stream,params_r,params_i,
627  model);
628  }
629 
630  sample_stream.close();
631  std::cout << std::endl << std::endl;
632  return 0;
633  }
634 
635  } // namespace prob
636 
637 
638 } // namespace stan
639 
640 #endif
Parses and stores command-line arguments.
Definition: cmd_line.hpp:111
bool has_key(const std::string &key) const
Return true if the specified key is defined.
Definition: cmd_line.hpp:163
bool has_flag(const std::string &flag) const
Return true if the specified flag is defined.
Definition: cmd_line.hpp:201
bool val(const std::string &key, T &x) const
Returns the value for the key provided.
Definition: cmd_line.hpp:187
Represents named arrays with dimensions.
Definition: dump.hpp:812
Adaptive Hamiltonian Monte Carlo (HMC) sampler.
virtual void write_sampler_param_names(std::ostream &o)
Write out any sampler-specific parameter names for output.
void set_output_stream(std::ostream &output_msgs)
Set the stream into which output will be written as the sampler runs.
void set_error_stream(std::ostream &error_msgs)
Set the stream into which errors will be written as the sampler runs.
No-U-Turn Sampler (NUTS) with varying step sizes.
Definition: nuts_diag.hpp:33
virtual void write_sampler_param_names(std::ostream &o)
Write out any sampler-specific parameter names for output.
Definition: nuts_diag.hpp:255
No-U-Turn Sampler (NUTS).
Definition: nuts.hpp:32
virtual void write_sampler_param_names(std::ostream &o)
Write out any sampler-specific parameter names for output.
Definition: nuts.hpp:216
Representation of a MCMC sample.
Definition: sampler.hpp:16
bool isfinite(const stan::agrad::var v)
Checks if the given number has finite value.
var fabs(const var &a)
Return the absolute value of the variable (cmath).
Definition: agrad.hpp:2023
var ceil(const var &a)
Return the ceiling of the specified variable (cmath).
Definition: agrad.hpp:2073
var log10(const var &a)
Return the base 10 log of the specified variable (cmath).
Definition: agrad.hpp:1744
bool do_print(int n, int refresh)
Definition: command.hpp:152
void print_nuts_help(std::string cmd)
Definition: command.hpp:35
void write_comment_property(std::ostream &o, const K &key, const V &val)
Definition: command.hpp:234
int nuts_command(int argc, const char *argv[])
Definition: command.hpp:241
void sample_from(Sampler &sampler, bool epsilon_adapt, int refresh, int num_iterations, int num_warmup, int num_thin, bool save_warmup, std::ostream &sample_file_stream, std::vector< double > &params_r, std::vector< int > &params_i, Model &model)
Definition: command.hpp:159
void write_comment(std::ostream &o)
Definition: command.hpp:225
void print_help_option(std::ostream *o, const std::string &key, const std::string &value_type, const std::string &msg, const std::string &note="")
Prints single print option to output ptr if non-null.
Definition: cmd_line.hpp:72
double epsilon()
Return minimum positive number representable.
double e()
Return the base of the natural logarithm.
double newton_step(stan::model::prob_grad &model, std::vector< double > &params_r, std::vector< int > &params_i, std::ostream *output_stream=0)
Definition: newton.hpp:32
Probability, optimization and sampling library.
Definition: agrad.cpp:6
const std::string MAJOR_VERSION
Major version number for Stan package.
Definition: version.hpp:9
const std::string MINOR_VERSION
Minor version number for Stan package.
Definition: version.hpp:12
const std::string PATCH_VERSION
Patch version for Stan package.
Definition: version.hpp:15

     [ Stan Home Page ] © 2011–2012, Stan Development Team.