Stan  2.5.0
probability, sampling & optimization
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
beta.hpp
Go to the documentation of this file.
1 #ifndef STAN__PROB__DISTRIBUTIONS__UNIVARIATE__CONTINUOUS__BETA_HPP
2 #define STAN__PROB__DISTRIBUTIONS__UNIVARIATE__CONTINUOUS__BETA_HPP
3 
4 #include <boost/math/special_functions/gamma.hpp>
5 #include <boost/random/gamma_distribution.hpp>
6 #include <boost/random/variate_generator.hpp>
7 
13 #include <stan/meta/traits.hpp>
14 #include <stan/prob/constants.hpp>
15 #include <stan/prob/traits.hpp>
17 
18 namespace stan {
19 
20  namespace prob {
21 
43  template <bool propto,
44  typename T_y, typename T_scale_succ, typename T_scale_fail>
45  typename return_type<T_y,T_scale_succ,T_scale_fail>::type
46  beta_log(const T_y& y,
47  const T_scale_succ& alpha, const T_scale_fail& beta) {
48  static const char* function = "stan::prob::beta_log(%1%)";
49 
51  using boost::math::lgamma;
53  using stan::is_vector;
58  using stan::math::log1m;
63 
64  // check if any vectors are zero length
65  if (!(stan::length(y)
66  && stan::length(alpha)
67  && stan::length(beta)))
68  return 0.0;
69 
70  // set up return value accumulator
71  double logp(0.0);
72 
73  // validate args (here done over var, which should be OK)
74  check_positive_finite(function, alpha, "First shape parameter", &logp);
75  check_positive_finite(function, beta, "Second shape parameter", &logp);
76  check_not_nan(function, y, "Random variable", &logp);
77  check_consistent_sizes(function,
78  y,alpha,beta,
79  "Random variable","First shape parameter",
80  "Second shape parameter",
81  &logp);
82  check_nonnegative(function, y, "Random variable", &logp);
83  check_less_or_equal(function, y, 1,"Random variable", &logp);
84 
85  // check if no variables are involved and prop-to
87  return 0.0;
88 
89  VectorView<const T_y> y_vec(y);
90  VectorView<const T_scale_succ> alpha_vec(alpha);
91  VectorView<const T_scale_fail> beta_vec(beta);
92  size_t N = max_size(y, alpha, beta);
93 
94  for (size_t n = 0; n < N; n++) {
95  const double y_dbl = value_of(y_vec[n]);
96  if (y_dbl < 0 || y_dbl > 1)
97  return LOG_ZERO;
98  }
99 
100  // set up template expressions wrapping scalars into vector views
102  operands_and_partials(y, alpha, beta);
103 
105  is_vector<T_y>::value> log_y(length(y));
107  is_vector<T_y>::value> log1m_y(length(y));
108 
109  for (size_t n = 0; n < length(y); n++) {
111  log_y[n] = log(value_of(y_vec[n]));
113  log1m_y[n] = log1m(value_of(y_vec[n]));
114  }
115 
117  is_vector<T_scale_succ>::value> lgamma_alpha(length(alpha));
119  is_vector<T_scale_succ>::value> digamma_alpha(length(alpha));
120  for (size_t n = 0; n < length(alpha); n++) {
122  lgamma_alpha[n] = lgamma(value_of(alpha_vec[n]));
124  digamma_alpha[n] = digamma(value_of(alpha_vec[n]));
125  }
126 
128  is_vector<T_scale_fail>::value> lgamma_beta(length(beta));
130  is_vector<T_scale_fail>::value> digamma_beta(length(beta));
131 
132  for (size_t n = 0; n < length(beta); n++) {
134  lgamma_beta[n] = lgamma(value_of(beta_vec[n]));
136  digamma_beta[n] = digamma(value_of(beta_vec[n]));
137  }
138 
142  lgamma_alpha_beta(max_size(alpha,beta));
143 
148  digamma_alpha_beta(max_size(alpha,beta));
149 
150  for (size_t n = 0; n < max_size(alpha,beta); n++) {
151  const double alpha_beta = value_of(alpha_vec[n])
152  + value_of(beta_vec[n]);
154  lgamma_alpha_beta[n] = lgamma(alpha_beta);
157  digamma_alpha_beta[n] = digamma(alpha_beta);
158  }
159 
160  for (size_t n = 0; n < N; n++) {
161  // pull out values of arguments
162  const double y_dbl = value_of(y_vec[n]);
163  const double alpha_dbl = value_of(alpha_vec[n]);
164  const double beta_dbl = value_of(beta_vec[n]);
165 
166  // log probability
168  logp += lgamma_alpha_beta[n];
170  logp -= lgamma_alpha[n];
172  logp -= lgamma_beta[n];
174  logp += (alpha_dbl-1.0) * log_y[n];
176  logp += (beta_dbl-1.0) * log1m_y[n];
177 
178  // gradients
180  operands_and_partials.d_x1[n] += (alpha_dbl-1)/y_dbl
181  + (beta_dbl-1)/(y_dbl-1);
183  operands_and_partials.d_x2[n]
184  += log_y[n] + digamma_alpha_beta[n] - digamma_alpha[n];
186  operands_and_partials.d_x3[n]
187  += log1m_y[n] + digamma_alpha_beta[n] - digamma_beta[n];
188  }
189  return operands_and_partials.to_var(logp);
190  }
191 
192  template <typename T_y, typename T_scale_succ, typename T_scale_fail>
194  beta_log(const T_y& y, const T_scale_succ& alpha,
195  const T_scale_fail& beta) {
196  return beta_log<false>(y,alpha,beta);
197  }
198 
211  template <typename T_y, typename T_scale_succ, typename T_scale_fail>
213  beta_cdf(const T_y& y, const T_scale_succ& alpha,
214  const T_scale_fail& beta) {
215 
216  // Size checks
217  if ( !( stan::length(y) && stan::length(alpha)
218  && stan::length(beta) ) )
219  return 1.0;
220 
221  // Error checks
222  static const char* function = "stan::prob::beta_cdf(%1%)";
223 
226  using boost::math::tools::promote_args;
228  using stan::math::value_of;
231 
232  double P(1.0);
233 
234  check_positive_finite(function, alpha, "First shape parameter", &P);
235  check_positive_finite(function, beta, "Second shape parameter", &P);
236  check_not_nan(function, y, "Random variable", &P);
237  check_consistent_sizes(function, y, alpha, beta,
238  "Random variable", "Shape parameter",
239  "Scale Parameter", &P);
240  check_nonnegative(function, y, "Random variable", &P);
241  check_less_or_equal(function, y, 1,"Random variable", &P);
242 
243  // Wrap arguments in vectors
244  VectorView<const T_y> y_vec(y);
245  VectorView<const T_scale_succ> alpha_vec(alpha);
246  VectorView<const T_scale_fail> beta_vec(beta);
247  size_t N = max_size(y, alpha, beta);
248 
250  operands_and_partials(y, alpha, beta);
251 
252  // Explicit return for extreme values
253  // The gradients are technically ill-defined, but treated as zero
254  for (size_t i = 0; i < stan::length(y); i++) {
255  if (value_of(y_vec[i]) <= 0)
256  return operands_and_partials.to_var(0.0);
257  }
258 
259  // Compute CDF and its gradients
260  using boost::math::ibeta;
261  using boost::math::ibeta_derivative;
262  using boost::math::digamma;
263 
264  // Cache a few expensive function calls if alpha or beta is a parameter
268  digamma_alpha_vec(max_size(alpha, beta));
269 
273  digamma_beta_vec(max_size(alpha, beta));
274 
278  digamma_sum_vec(max_size(alpha, beta));
279 
283  betafunc_vec(max_size(alpha, beta));
284 
287 
288  for (size_t i = 0; i < N; i++) {
289 
290  const double alpha_dbl = value_of(alpha_vec[i]);
291  const double beta_dbl = value_of(beta_vec[i]);
292 
293  digamma_alpha_vec[i] = digamma(alpha_dbl);
294  digamma_beta_vec[i] = digamma(beta_dbl);
295  digamma_sum_vec[i] = digamma(alpha_dbl + beta_dbl);
296  betafunc_vec[i] = boost::math::beta(alpha_dbl, beta_dbl);
297 
298  }
299 
300  }
301 
302  // Compute vectorized CDF and gradient
303  for (size_t n = 0; n < N; n++) {
304 
305  // Explicit results for extreme values
306  // The gradients are technically ill-defined, but treated as zero
307  if (value_of(y_vec[n]) >= 1.0) continue;
308 
309  // Pull out values
310  const double y_dbl = value_of(y_vec[n]);
311  const double alpha_dbl = value_of(alpha_vec[n]);
312  const double beta_dbl = value_of(beta_vec[n]);
313 
314  // Compute
315  const double Pn = ibeta(alpha_dbl, beta_dbl, y_dbl);
316 
317  P *= Pn;
318 
320  operands_and_partials.d_x1[n] += ibeta_derivative(alpha_dbl, beta_dbl,
321  y_dbl) / Pn;
322 
323  double g1 = 0;
324  double g2 = 0;
325 
328  stan::math::gradRegIncBeta(g1, g2, alpha_dbl, beta_dbl, y_dbl,
329  digamma_alpha_vec[n],
330  digamma_beta_vec[n], digamma_sum_vec[n],
331  betafunc_vec[n]);
332  }
333 
335  operands_and_partials.d_x2[n] += g1 / Pn;
337  operands_and_partials.d_x3[n] += g2 / Pn;
338  }
339 
341  for(size_t n = 0; n < stan::length(y); ++n)
342  operands_and_partials.d_x1[n] *= P;
343  }
345  for(size_t n = 0; n < stan::length(alpha); ++n)
346  operands_and_partials.d_x2[n] *= P;
347  }
349  for(size_t n = 0; n < stan::length(beta); ++n)
350  operands_and_partials.d_x3[n] *= P;
351  }
352 
353  return operands_and_partials.to_var(P);
354  }
355 
356  template <typename T_y, typename T_scale_succ, typename T_scale_fail>
358  beta_cdf_log(const T_y& y, const T_scale_succ& alpha,
359  const T_scale_fail& beta) {
360 
361  // Size checks
362  if ( !( stan::length(y) && stan::length(alpha)
363  && stan::length(beta) ) )
364  return 0.0;
365 
366  // Error checks
367  static const char* function = "stan::prob::beta_cdf(%1%)";
368 
373  using boost::math::tools::promote_args;
375  using stan::math::value_of;
376 
377  double cdf_log(0.0);
378 
379  check_positive_finite(function, alpha, "First shape parameter", &cdf_log);
380  check_positive_finite(function, beta, "Second shape parameter", &cdf_log);
381  check_not_nan(function, y, "Random variable", &cdf_log);
382  check_nonnegative(function, y, "Random variable", &cdf_log);
383  check_less_or_equal(function, y, 1,"Random variable", &cdf_log);
384  check_consistent_sizes(function, y, alpha, beta,
385  "Random variable", "Shape parameter",
386  "Scale Parameter", &cdf_log);
387 
388  // Wrap arguments in vectors
389  VectorView<const T_y> y_vec(y);
390  VectorView<const T_scale_succ> alpha_vec(alpha);
391  VectorView<const T_scale_fail> beta_vec(beta);
392  size_t N = max_size(y, alpha, beta);
393 
395  operands_and_partials(y, alpha, beta);
396 
397  // Compute CDF and its gradients
398  using boost::math::ibeta;
399  using boost::math::ibeta_derivative;
400  using boost::math::digamma;
401 
402  // Cache a few expensive function calls if alpha or beta is a parameter
406  digamma_alpha_vec(max_size(alpha, beta));
407 
411  digamma_beta_vec(max_size(alpha, beta));
412 
416  digamma_sum_vec(max_size(alpha, beta));
417 
421  betafunc_vec(max_size(alpha, beta));
422 
425 
426  for (size_t i = 0; i < N; i++) {
427 
428  const double alpha_dbl = value_of(alpha_vec[i]);
429  const double beta_dbl = value_of(beta_vec[i]);
430 
431  digamma_alpha_vec[i] = digamma(alpha_dbl);
432  digamma_beta_vec[i] = digamma(beta_dbl);
433  digamma_sum_vec[i] = digamma(alpha_dbl + beta_dbl);
434  betafunc_vec[i] = boost::math::beta(alpha_dbl, beta_dbl);
435  }
436  }
437 
438  // Compute vectorized CDFLog and gradient
439  for (size_t n = 0; n < N; n++) {
440 
441  // Pull out values
442  const double y_dbl = value_of(y_vec[n]);
443  const double alpha_dbl = value_of(alpha_vec[n]);
444  const double beta_dbl = value_of(beta_vec[n]);
445 
446  // Compute
447  const double Pn = ibeta(alpha_dbl, beta_dbl, y_dbl);
448 
449  cdf_log += log(Pn);
450 
452  operands_and_partials.d_x1[n] +=
453  ibeta_derivative(alpha_dbl, beta_dbl, y_dbl) / Pn;
454 
455  double g1 = 0;
456  double g2 = 0;
457 
460  stan::math::gradRegIncBeta(g1, g2, alpha_dbl, beta_dbl, y_dbl,
461  digamma_alpha_vec[n],
462  digamma_beta_vec[n], digamma_sum_vec[n],
463  betafunc_vec[n]);
464  }
466  operands_and_partials.d_x2[n] += g1 / Pn;
468  operands_and_partials.d_x3[n] += g2 / Pn;
469  }
470 
471  return operands_and_partials.to_var(cdf_log);
472  }
473 
474  template <typename T_y, typename T_scale_succ, typename T_scale_fail>
476  beta_ccdf_log(const T_y& y, const T_scale_succ& alpha,
477  const T_scale_fail& beta) {
478 
479  // Size checks
480  if ( !( stan::length(y) && stan::length(alpha)
481  && stan::length(beta) ) )
482  return 0.0;
483 
484  // Error checks
485  static const char* function = "stan::prob::beta_cdf(%1%)";
486 
491  using boost::math::tools::promote_args;
493  using stan::math::value_of;
494 
495  double ccdf_log(0.0);
496 
497  check_positive_finite(function, alpha, "First shape parameter",
498  &ccdf_log);
499  check_positive_finite(function, beta, "Second shape parameter",
500  &ccdf_log);
501  check_not_nan(function, y, "Random variable", &ccdf_log);
502  check_nonnegative(function, y, "Random variable", &ccdf_log);
503  check_less_or_equal(function, y, 1,"Random variable", &ccdf_log);
504  check_consistent_sizes(function, y, alpha, beta,
505  "Random variable", "Shape parameter",
506  "Scale Parameter", &ccdf_log);
507 
508  // Wrap arguments in vectors
509  VectorView<const T_y> y_vec(y);
510  VectorView<const T_scale_succ> alpha_vec(alpha);
511  VectorView<const T_scale_fail> beta_vec(beta);
512  size_t N = max_size(y, alpha, beta);
513 
515  operands_and_partials(y, alpha, beta);
516 
517  // Compute CDF and its gradients
518  using boost::math::ibeta;
519  using boost::math::ibeta_derivative;
520  using boost::math::digamma;
521 
522  // Cache a few expensive function calls if alpha or beta is a parameter
526  digamma_alpha_vec(max_size(alpha, beta));
530  digamma_beta_vec(max_size(alpha, beta));
534  digamma_sum_vec(max_size(alpha, beta));
538  betafunc_vec(max_size(alpha, beta));
539 
542 
543  for (size_t i = 0; i < N; i++) {
544 
545  const double alpha_dbl = value_of(alpha_vec[i]);
546  const double beta_dbl = value_of(beta_vec[i]);
547 
548  digamma_alpha_vec[i] = digamma(alpha_dbl);
549  digamma_beta_vec[i] = digamma(beta_dbl);
550  digamma_sum_vec[i] = digamma(alpha_dbl + beta_dbl);
551  betafunc_vec[i] = boost::math::beta(alpha_dbl, beta_dbl);
552  }
553  }
554 
555  // Compute vectorized CDFLog and gradient
556  for (size_t n = 0; n < N; n++) {
557 
558  // Pull out values
559  const double y_dbl = value_of(y_vec[n]);
560  const double alpha_dbl = value_of(alpha_vec[n]);
561  const double beta_dbl = value_of(beta_vec[n]);
562 
563  // Compute
564  const double Pn = 1.0 - ibeta(alpha_dbl, beta_dbl, y_dbl);
565 
566  ccdf_log += log(Pn);
567 
569  operands_and_partials.d_x1[n] -=
570  ibeta_derivative(alpha_dbl, beta_dbl, y_dbl) / Pn;
571 
572  double g1 = 0;
573  double g2 = 0;
574 
577  stan::math::gradRegIncBeta(g1, g2, alpha_dbl, beta_dbl, y_dbl,
578  digamma_alpha_vec[n],
579  digamma_beta_vec[n], digamma_sum_vec[n],
580  betafunc_vec[n]);
581  }
583  operands_and_partials.d_x2[n] -= g1 / Pn;
585  operands_and_partials.d_x3[n] -= g2 / Pn;
586  }
587 
588  return operands_and_partials.to_var(ccdf_log);
589  }
590 
591  template <class RNG>
592  inline double
593  beta_rng(const double alpha,
594  const double beta,
595  RNG& rng) {
596  using boost::variate_generator;
597  using boost::random::gamma_distribution;
598  // Error checks
599  static const char* function = "stan::prob::beta_rng(%1%)";
600 
602 
603  check_positive_finite(function, alpha, "First shape parameter",
604  (double*)0);
605  check_positive_finite(function, beta, "Second shape parameter",
606  (double*)0);
607 
608  variate_generator<RNG&, gamma_distribution<> >
609  rng_gamma_alpha(rng, gamma_distribution<>(alpha, 1.0));
610  variate_generator<RNG&, gamma_distribution<> >
611  rng_gamma_beta(rng, gamma_distribution<>(beta, 1.0));
612  double a = rng_gamma_alpha();
613  double b = rng_gamma_beta();
614  return a / (a + b);
615  }
616 
617  }
618 }
619 #endif
fvar< T > log1m(const fvar< T > &x)
Definition: log1m.hpp:16
return_type< T_y, T_scale_succ, T_scale_fail >::type beta_cdf(const T_y &y, const T_scale_succ &alpha, const T_scale_fail &beta)
Calculates the beta cumulative distribution function for the given variate and scale variables...
Definition: beta.hpp:213
void gradRegIncBeta(double &g1, double &g2, double a, double b, double z, double digammaA, double digammaB, double digammaSum, double betaAB)
return_type< T_y, T_scale_succ, T_scale_fail >::type beta_cdf_log(const T_y &y, const T_scale_succ &alpha, const T_scale_fail &beta)
Definition: beta.hpp:358
T_return_type to_var(double logp)
bool check_positive_finite(const char *function, const T_y &y, const char *name, T_result *result)
boost::math::tools::promote_args< T_a, T_b >::type multiply_log(const T_a a, const T_b b)
Calculated the value of the first argument times log of the second argument while behaving properly w...
size_t length(const T &)
Definition: traits.hpp:159
return_type< T_y, T_scale_succ, T_scale_fail >::type beta_log(const T_y &y, const T_scale_succ &alpha, const T_scale_fail &beta)
The log of the beta density for the specified scalar(s) given the specified sample size(s)...
Definition: beta.hpp:46
DoubleVectorView allocates double values to be used as intermediate values.
Definition: traits.hpp:358
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition: value_of.hpp:16
fvar< T > lgamma(const fvar< T > &x)
Definition: lgamma.hpp:15
A variable implementation that stores operands and derivatives with respect to the variable...
boost::math::tools::promote_args< typename scalar_type< T1 >::type, typename scalar_type< T2 >::type, typename scalar_type< T3 >::type, typename scalar_type< T4 >::type, typename scalar_type< T5 >::type, typename scalar_type< T6 >::type >::type type
Definition: traits.hpp:406
Metaprogram to determine if a type has a base scalar type that can be assigned to type double...
Definition: traits.hpp:57
double value_of(const T x)
Return the value of the specified scalar argument converted to a double value.
Definition: value_of.hpp:24
Template metaprogram to calculate whether a summand needs to be included in a proportional (log) prob...
Definition: traits.hpp:35
VectorView< double *, is_vector< T2 >::value, is_constant_struct< T2 >::value > d_x2
var ibeta(const var &a, const var &b, const var &x)
The normalized incomplete beta function of a, b, and x.
Definition: ibeta.hpp:223
bool check_nonnegative(const char *function, const T_y &y, const char *name, T_result *result)
bool check_consistent_sizes(const char *function, const T1 &x1, const T2 &x2, const char *name1, const char *name2, T_result *result)
size_t max_size(const T1 &x1, const T2 &x2)
Definition: traits.hpp:191
double beta_rng(const double alpha, const double beta, RNG &rng)
Definition: beta.hpp:593
bool check_less_or_equal(const char *function, const T_y &y, const T_high &high, const char *name, T_result *result)
bool check_not_nan(const char *function, const T_y &y, const char *name, T_result *result)
Checks if the variable y is nan.
fvar< T > digamma(const fvar< T > &x)
Definition: digamma.hpp:16
return_type< T_y, T_scale_succ, T_scale_fail >::type beta_ccdf_log(const T_y &y, const T_scale_succ &alpha, const T_scale_fail &beta)
Definition: beta.hpp:476
VectorView< double *, is_vector< T1 >::value, is_constant_struct< T1 >::value > d_x1
VectorView< double *, is_vector< T3 >::value, is_constant_struct< T3 >::value > d_x3
fvar< T > log(const fvar< T > &x)
Definition: log.hpp:15
VectorView is a template metaprogram that takes its argument and allows it to be used like a vector...
Definition: traits.hpp:275
boost::math::tools::promote_args< T >::type log1m(T x)
Return the natural logarithm of one minus the specified value.
Definition: log1m.hpp:40

     [ Stan Home Page ] © 2011–2014, Stan Development Team.