1 #ifndef STAN__AGRAD__REV__FUNCTIONS__LOG_FALLING_FACTORIAL_HPP
2 #define STAN__AGRAD__REV__FUNCTIONS__LOG_FALLING_FACTORIAL_HPP
9 #include <boost/math/special_functions/digamma.hpp>
10 #include <boost/math/special_functions/fpclassify.hpp>
17 class log_falling_factorial_vv_vari :
public op_vv_vari {
19 log_falling_factorial_vv_vari(vari* avi, vari* bvi) :
25 avi_->adj_ = std::numeric_limits<double>::quiet_NaN();
26 bvi_->adj_ = std::numeric_limits<double>::quiet_NaN();
34 class log_falling_factorial_vd_vari :
public op_vd_vari {
36 log_falling_factorial_vd_vari(vari* avi,
double b) :
42 avi_->adj_ = std::numeric_limits<double>::quiet_NaN();
48 class log_falling_factorial_dv_vari :
public op_dv_vari {
50 log_falling_factorial_dv_vari(
double a, vari* bvi) :
56 bvi_->adj_ = std::numeric_limits<double>::quiet_NaN();
65 return var(
new log_falling_factorial_vd_vari(a.
vi_, b));
70 return var(
new log_falling_factorial_vv_vari(a.
vi_, b.
vi_));
75 return var(
new log_falling_factorial_dv_vari(a, b.
vi_));
bool isnan(const stan::agrad::var &v)
Checks if the given number is NaN.
vari * vi_
Pointer to the implementation of this variable.
fvar< T > log_falling_factorial(const fvar< T > &x, const fvar< T > &n)
Independent (input) and dependent (output) variables for gradients.
fvar< T > digamma(const fvar< T > &x)