1 #include <stan/math/rev.hpp>
2 #include <stan/math/prim.hpp>
3 #include <gtest/gtest.h>
4 #include <vector>
5 #include <cmath>
6 
7 //  We check that the values of the new regression match those of one built
8 //  from existing primitives.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_doubles)9 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_log_doubles) {
10   using Eigen::Dynamic;
11   using Eigen::Matrix;
12   using stan::math::var;
13   using std::vector;
14   vector<int> y{15, 3, 5};
15   Matrix<double, Dynamic, Dynamic> x(3, 2);
16   x << -12, 46, -42, 24, 25, 27;
17   Matrix<double, Dynamic, 1> beta(2, 1);
18   beta << 0.3, 2;
19   double alpha = 0.3;
20   Matrix<double, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
21   Matrix<double, Dynamic, 1> theta(3, 1);
22   theta = x * beta + alphavec;
23   EXPECT_FLOAT_EQ((stan::math::poisson_log_lpmf(y, theta)),
24                   (stan::math::poisson_log_glm_lpmf(y, x, alpha, beta)));
25   EXPECT_FLOAT_EQ((stan::math::poisson_log_lpmf<true>(y, theta)),
26                   (stan::math::poisson_log_glm_lpmf<true>(y, x, alpha, beta)));
27 }
28 //  We check that the values of the new regression match those of one built
29 //  from existing primitives.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_doubles_rand)30 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_log_doubles_rand) {
31   using Eigen::Dynamic;
32   using Eigen::Matrix;
33   using stan::math::var;
34   using std::vector;
35   for (size_t ii = 0; ii < 42; ii++) {
36     vector<int> y(3);
37     for (size_t i = 0; i < 3; i++) {
38       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 200;
39     }
40     Matrix<double, Dynamic, Dynamic> x
41         = Matrix<double, Dynamic, Dynamic>::Random(3, 2);
42     Matrix<double, Dynamic, 1> beta
43         = Matrix<double, Dynamic, Dynamic>::Random(2, 1);
44     Matrix<double, 1, 1> alphamat = Matrix<double, 1, 1>::Random(1, 1);
45     double alpha = alphamat[0];
46     Matrix<double, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
47     Matrix<double, Dynamic, 1> theta(3, 1);
48     theta = x * beta + alphavec;
49     EXPECT_FLOAT_EQ((stan::math::poisson_log_lpmf(y, theta)),
50                     (stan::math::poisson_log_glm_lpmf(y, x, alpha, beta)));
51     EXPECT_FLOAT_EQ(
52         (stan::math::poisson_log_lpmf<true>(y, theta)),
53         (stan::math::poisson_log_glm_lpmf<true>(y, x, alpha, beta)));
54   }
55 }
56 //  We check that the gradients of the new regression match those of one built
57 //  from existing primitives.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_vars)58 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_log_vars) {
59   using Eigen::Dynamic;
60   using Eigen::Matrix;
61   using stan::math::var;
62   using std::vector;
63   vector<int> y{14, 2, 5};
64   Matrix<var, Dynamic, Dynamic> x(3, 2);
65   x << -12, 46, -42, 24, 25, 27;
66   Matrix<var, Dynamic, 1> beta(2, 1);
67   beta << 0.3, 2;
68   var alpha = 0.3;
69   Matrix<var, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
70   Matrix<var, Dynamic, 1> theta(3, 1);
71   theta = x * beta + alphavec;
72   var lp = stan::math::poisson_log_lpmf(y, theta);
73   lp.grad();
74 
75   double lp_val = lp.val();
76   double alpha_adj = alpha.adj();
77   Matrix<double, Dynamic, Dynamic> x_adj(3, 2);
78   Matrix<double, Dynamic, 1> beta_adj(2, 1);
79   for (size_t i = 0; i < 2; i++) {
80     beta_adj[i] = beta[i].adj();
81     for (size_t j = 0; j < 3; j++) {
82       x_adj(j, i) = x(j, i).adj();
83     }
84   }
85 
86   stan::math::recover_memory();
87 
88   vector<int> y2{14, 2, 5};
89   Matrix<var, Dynamic, Dynamic> x2(3, 2);
90   x2 << -12, 46, -42, 24, 25, 27;
91   Matrix<var, Dynamic, 1> beta2(2, 1);
92   beta2 << 0.3, 2;
93   var alpha2 = 0.3;
94   var lp2 = stan::math::poisson_log_glm_lpmf(y2, x2, alpha2, beta2);
95   lp2.grad();
96   EXPECT_FLOAT_EQ(lp_val, lp2.val());
97   EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
98   for (size_t i = 0; i < 2; i++) {
99     EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
100     for (size_t j = 0; j < 3; j++) {
101       EXPECT_FLOAT_EQ(x_adj(j, i), x2(j, i).adj());
102     }
103   }
104 }
105 
TEST(ProbDistributionsPoissonLogGLM,broadcast_x)106 TEST(ProbDistributionsPoissonLogGLM, broadcast_x) {
107   using Eigen::Dynamic;
108   using Eigen::Matrix;
109   using stan::math::var;
110   using std::vector;
111   vector<int> y{1, 0, 5};
112   Matrix<double, 1, Dynamic> x(1, 2);
113   x << -12, 46;
114   Matrix<var, 1, Dynamic> x1 = x;
115   Matrix<var, Dynamic, Dynamic> x_mat = x.replicate(3, 1);
116   Matrix<double, Dynamic, 1> beta(2, 1);
117   beta << 0.3, 2;
118   Matrix<var, Dynamic, 1> beta1 = beta;
119   Matrix<var, Dynamic, 1> beta2 = beta;
120   var alpha1 = 0.3;
121   var alpha2 = 0.3;
122 
123   var lp1 = stan::math::poisson_log_glm_lpmf(y, x1, alpha1, beta1);
124   var lp2 = stan::math::poisson_log_glm_lpmf(y, x_mat, alpha2, beta2);
125 
126   EXPECT_DOUBLE_EQ(lp1.val(), lp2.val());
127 
128   (lp1 + lp2).grad();
129 
130   for (int i = 0; i < 2; i++) {
131     EXPECT_DOUBLE_EQ(x1[i].adj(), x_mat.col(i).adj().sum());
132     EXPECT_DOUBLE_EQ(beta1[i].adj(), beta2[i].adj());
133   }
134   EXPECT_DOUBLE_EQ(alpha1.adj(), alpha2.adj());
135 }
136 
TEST(ProbDistributionsPoissonLogGLM,broadcast_y)137 TEST(ProbDistributionsPoissonLogGLM, broadcast_y) {
138   using Eigen::Dynamic;
139   using Eigen::Matrix;
140   using stan::math::var;
141   using std::vector;
142   int y = 13;
143   Matrix<int, Dynamic, 1> y_vec = Matrix<int, Dynamic, 1>::Constant(3, 1, y);
144   Matrix<double, Dynamic, Dynamic> x(3, 2);
145   x << -12, 46, -42, 24, 25, 27;
146   Matrix<var, Dynamic, Dynamic> x1 = x;
147   Matrix<var, Dynamic, Dynamic> x2 = x;
148   Matrix<double, Dynamic, 1> beta(2, 1);
149   beta << 0.3, 2;
150   Matrix<var, Dynamic, 1> beta1 = beta;
151   Matrix<var, Dynamic, 1> beta2 = beta;
152   var alpha1 = 0.3;
153   var alpha2 = 0.3;
154 
155   var lp1 = stan::math::poisson_log_glm_lpmf(y, x1, alpha1, beta1);
156   var lp2 = stan::math::poisson_log_glm_lpmf(y_vec, x2, alpha2, beta2);
157 
158   EXPECT_DOUBLE_EQ(lp1.val(), lp2.val());
159 
160   (lp1 + lp2).grad();
161 
162   for (int i = 0; i < 2; i++) {
163     for (int j = 0; j < 2; j++) {
164       EXPECT_DOUBLE_EQ(x1(j, i).adj(), x2(j, i).adj());
165     }
166     EXPECT_DOUBLE_EQ(beta1[i].adj(), beta2[i].adj());
167   }
168   EXPECT_DOUBLE_EQ(alpha1.adj(), alpha2.adj());
169 }
170 
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_vars_zero_instances)171 TEST(ProbDistributionsPoissonLogGLM,
172      glm_matches_poisson_log_vars_zero_instances) {
173   using Eigen::Dynamic;
174   using Eigen::Matrix;
175   using stan::math::var;
176   using std::vector;
177   vector<int> y{};
178   Matrix<var, Dynamic, Dynamic> x(0, 2);
179   Matrix<var, Dynamic, 1> beta(2, 1);
180   beta << 0.3, 2;
181   var alpha = 0.3;
182   Matrix<var, Dynamic, 1> theta(0, 1);
183   var lp = stan::math::poisson_log_lpmf(y, theta);
184   lp.grad();
185 
186   double lp_val = lp.val();
187   double alpha_adj = alpha.adj();
188   Matrix<double, Dynamic, 1> beta_adj(2, 1);
189   for (size_t i = 0; i < 2; i++) {
190     beta_adj[i] = beta[i].adj();
191   }
192 
193   stan::math::recover_memory();
194 
195   vector<int> y2{};
196   Matrix<var, Dynamic, Dynamic> x2(0, 2);
197   Matrix<var, Dynamic, 1> beta2(2, 1);
198   beta2 << 0.3, 2;
199   var alpha2 = 0.3;
200   var lp2 = stan::math::poisson_log_glm_lpmf(y2, x2, alpha2, beta2);
201   lp2.grad();
202   EXPECT_FLOAT_EQ(lp_val, lp2.val());
203   EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
204   for (size_t i = 0; i < 2; i++) {
205     EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
206   }
207 }
208 
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_vars_zero_attributes)209 TEST(ProbDistributionsPoissonLogGLM,
210      glm_matches_poisson_log_vars_zero_attributes) {
211   using Eigen::Dynamic;
212   using Eigen::Matrix;
213   using stan::math::var;
214   using std::vector;
215   vector<int> y{14, 2, 5};
216   Matrix<var, Dynamic, Dynamic> x(3, 0);
217   Matrix<var, Dynamic, 1> beta(0, 1);
218   var alpha = 0.3;
219   Matrix<var, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
220   Matrix<var, Dynamic, 1> theta(3, 1);
221   theta = x * beta + alphavec;
222   var lp = stan::math::poisson_log_lpmf(y, theta);
223   lp.grad();
224 
225   double lp_val = lp.val();
226   double alpha_adj = alpha.adj();
227 
228   stan::math::recover_memory();
229 
230   vector<int> y2{14, 2, 5};
231   Matrix<var, Dynamic, Dynamic> x2(3, 0);
232   Matrix<var, Dynamic, 1> beta2(0, 1);
233   var alpha2 = 0.3;
234   var lp2 = stan::math::poisson_log_glm_lpmf(y2, x2, alpha2, beta2);
235   lp2.grad();
236   EXPECT_FLOAT_EQ(lp_val, lp2.val());
237   EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
238 }
239 
240 //  We check that the gradients of the new regression match those of one built
241 //  from existing primitives.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_vars_rand)242 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_log_vars_rand) {
243   using Eigen::Dynamic;
244   using Eigen::Matrix;
245   using stan::math::var;
246   using std::vector;
247   for (size_t ii = 0; ii < 200; ii++) {
248     vector<int> y(3);
249     for (size_t i = 0; i < 3; i++) {
250       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 200;
251     }
252     Matrix<double, Dynamic, Dynamic> xreal
253         = Matrix<double, Dynamic, Dynamic>::Random(3, 2);
254     Matrix<double, Dynamic, 1> betareal
255         = Matrix<double, Dynamic, Dynamic>::Random(2, 1);
256     Matrix<double, 1, 1> alphareal = Matrix<double, 1, 1>::Random(1, 1);
257     Matrix<var, Dynamic, 1> beta = betareal;
258     Matrix<var, Dynamic, 1> theta(3, 1);
259     Matrix<var, Dynamic, Dynamic> x = xreal;
260     var alpha = alphareal[0];
261     Matrix<var, Dynamic, 1> alphavec = Matrix<double, 3, 1>::Ones() * alpha;
262     theta = (x * beta) + alphavec;
263     var lp = stan::math::poisson_log_lpmf(y, theta);
264     lp.grad();
265 
266     double lp_val = lp.val();
267     double alpha_adj = alpha.adj();
268     Matrix<double, Dynamic, Dynamic> x_adj(3, 2);
269     Matrix<double, Dynamic, 1> beta_adj(2, 1);
270     for (size_t i = 0; i < 2; i++) {
271       beta_adj[i] = beta[i].adj();
272       for (size_t j = 0; j < 3; j++) {
273         x_adj(j, i) = x(j, i).adj();
274       }
275     }
276 
277     stan::math::recover_memory();
278 
279     Matrix<var, Dynamic, 1> beta2 = betareal;
280     Matrix<var, Dynamic, Dynamic> x2 = xreal;
281     var alpha2 = alphareal[0];
282     var lp2 = stan::math::poisson_log_glm_lpmf(y, x2, alpha2, beta2);
283     lp2.grad();
284     EXPECT_FLOAT_EQ(lp_val, lp2.val());
285     EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
286     for (size_t i = 0; i < 2; i++) {
287       EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
288       for (size_t j = 0; j < 3; j++) {
289         EXPECT_FLOAT_EQ(x_adj(j, i), x2(j, i).adj());
290       }
291     }
292   }
293 }
294 
295 //  We check that the gradients of the new regression match those of one built
296 //  from existing primitives, in case beta is a scalar.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_vars_rand_scal_beta)297 TEST(ProbDistributionsPoissonLogGLM,
298      glm_matches_poisson_log_vars_rand_scal_beta) {
299   using Eigen::Dynamic;
300   using Eigen::Matrix;
301   using stan::math::var;
302   using std::vector;
303   for (size_t ii = 0; ii < 42; ii++) {
304     vector<int> y(3);
305     for (size_t i = 0; i < 3; i++) {
306       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 200;
307     }
308     Matrix<double, Dynamic, Dynamic> xreal
309         = Matrix<double, Dynamic, Dynamic>::Random(3, 1);
310     double betareal = Matrix<double, Dynamic, Dynamic>::Random(1, 1)(0, 0);
311     Matrix<double, 1, 1> alphareal = Matrix<double, 1, 1>::Random(1, 1);
312     var beta = betareal;
313     Matrix<var, Dynamic, 1> theta(3, 1);
314     Matrix<var, Dynamic, Dynamic> x = xreal;
315     var alpha = alphareal[0];
316     Matrix<var, Dynamic, 1> alphavec = Matrix<double, 3, 1>::Ones() * alpha;
317     theta = (x * beta) + alphavec;
318     var lp = stan::math::poisson_log_lpmf(y, theta);
319     lp.grad();
320 
321     double lp_val = lp.val();
322     double alpha_adj = alpha.adj();
323     Matrix<double, Dynamic, Dynamic> x_adj(3, 1);
324     double beta_adj = beta.adj();
325     for (size_t j = 0; j < 3; j++) {
326       x_adj(j, 0) = x(j, 0).adj();
327     }
328 
329     stan::math::recover_memory();
330 
331     var beta2 = betareal;
332     Matrix<var, Dynamic, Dynamic> x2 = xreal;
333     var alpha2 = alphareal[0];
334     var lp2 = stan::math::poisson_log_glm_lpmf(y, x2, alpha2, beta2);
335     lp2.grad();
336     EXPECT_FLOAT_EQ(lp_val, lp2.val());
337     EXPECT_FLOAT_EQ(beta_adj, beta2.adj());
338     EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
339     for (size_t j = 0; j < 3; j++) {
340       EXPECT_FLOAT_EQ(x_adj(j, 0), x2(j, 0).adj());
341     }
342   }
343 }
344 
345 //  We check that the gradients of the new regression match those of one built
346 //  from existing primitives, for the GLM with varying intercept.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_varying_intercept)347 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_varying_intercept) {
348   using Eigen::Dynamic;
349   using Eigen::Matrix;
350   using stan::math::var;
351   using std::vector;
352   for (size_t ii = 0; ii < 42; ii++) {
353     vector<int> y(3);
354     for (size_t i = 0; i < 3; i++) {
355       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 200;
356     }
357     Matrix<double, Dynamic, Dynamic> xreal
358         = Matrix<double, Dynamic, Dynamic>::Random(3, 2);
359     Matrix<double, Dynamic, 1> betareal
360         = Matrix<double, Dynamic, Dynamic>::Random(2, 1);
361     Matrix<double, Dynamic, 1> alphareal
362         = Matrix<double, Dynamic, Dynamic>::Random(3, 1);
363 
364     Matrix<var, Dynamic, 1> beta = betareal;
365     Matrix<var, Dynamic, 1> theta(3, 1);
366     Matrix<var, Dynamic, Dynamic> x = xreal;
367     Matrix<var, Dynamic, 1> alpha = alphareal;
368     theta = (x * beta) + alpha;
369     var lp = stan::math::poisson_log_lpmf(y, theta);
370 
371     lp.grad();
372 
373     double lp_val = lp.val();
374     Matrix<double, Dynamic, 1> alpha_adj(3, 1);
375     Matrix<double, Dynamic, Dynamic> x_adj(3, 2);
376     Matrix<double, Dynamic, 1> beta_adj(2, 1);
377     for (size_t i = 0; i < 2; i++) {
378       beta_adj[i] = beta[i].adj();
379       for (size_t j = 0; j < 3; j++) {
380         x_adj(j, i) = x(j, i).adj();
381       }
382     }
383     for (size_t j = 0; j < 3; j++) {
384       alpha_adj[j] = alpha[j].adj();
385     }
386 
387     stan::math::recover_memory();
388 
389     Matrix<var, Dynamic, 1> beta2 = betareal;
390     Matrix<var, Dynamic, Dynamic> x2 = xreal;
391     Matrix<var, Dynamic, 1> alpha2 = alphareal;
392 
393     var lp2 = stan::math::poisson_log_glm_lpmf(y, x2, alpha2, beta2);
394     lp2.grad();
395 
396     EXPECT_FLOAT_EQ(lp_val, lp2.val());
397     for (size_t i = 0; i < 2; i++) {
398       EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
399       for (size_t j = 0; j < 3; j++) {
400         EXPECT_FLOAT_EQ(x_adj(j, i), x2(j, i).adj());
401       }
402     }
403     for (size_t j = 0; j < 3; j++) {
404       EXPECT_FLOAT_EQ(alpha_adj[j], alpha2[j].adj());
405     }
406   }
407 }
408 
409 //  We check that we can instantiate all different interface types.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_interface_types)410 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_log_interface_types) {
411   using Eigen::Dynamic;
412   using Eigen::Matrix;
413   using stan::math::var;
414   using std::vector;
415   double value = 0;
416   double value2 = 0;
417 
418   int i = 1;
419   std::vector<int> vi = {{1, 0}};
420   double d = 1.0;
421   std::vector<double> vd = {{1.0, 2.0}};
422   Eigen::VectorXd ev(2);
423   Eigen::RowVectorXd rv(2);
424   Eigen::MatrixXd m1(1, 1);
425   m1 << 1.0;
426   Eigen::MatrixXd m(2, 2);
427   ev << 1.0, 2.0;
428   rv << 1.0, 2.0;
429   m << 1.0, 2.0, 3.0, 4.0;
430 
431   value += stan::math::poisson_log_glm_lpmf(i, m1, d, d);
432   value += stan::math::poisson_log_glm_lpmf(vi, m, vd, vd);
433   value += stan::math::poisson_log_glm_lpmf(vi, m, ev, ev);
434   value += stan::math::poisson_log_glm_lpmf(vi, m, rv, rv);
435 
436   var v = 1.0;
437   std::vector<var> vv = {{1.0, 2.0}};
438   Eigen::Matrix<var, -1, 1> evv(2);
439   Eigen::Matrix<var, 1, -1> rvv(2);
440   Eigen::Matrix<var, -1, -1> m1v(1, 1);
441   m1v << 1.0;
442   Eigen::Matrix<var, -1, -1> mv(2, 2);
443   evv << 1.0, 2.0;
444   rvv << 1.0, 2.0;
445   mv << 1.0, 2.0, 3.0, 4.0;
446 
447   value2 += stan::math::poisson_log_glm_lpmf(i, m1v, v, v).val();
448   value2 += stan::math::poisson_log_glm_lpmf(vi, mv, vv, vv).val();
449   value2 += stan::math::poisson_log_glm_lpmf(vi, mv, evv, evv).val();
450   value2 += stan::math::poisson_log_glm_lpmf(vi, mv, rvv, rvv).val();
451 
452   EXPECT_FLOAT_EQ(value, value2);
453 }
454 
455 //  We check that the right errors are thrown.
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_error_checking)456 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_log_error_checking) {
457   int N = 3;
458   int M = 2;
459   int W = 4;
460 
461   Eigen::Matrix<int, -1, 1> y(N, 1);
462   for (int n = 0; n < N; n++) {
463     y[n] = Eigen::Matrix<unsigned int, -1, 1>::Random(1, 1)[0] % 200;
464   }
465   Eigen::Matrix<int, -1, 1> yw1(W, 1);
466   for (int n = 0; n < W; n++) {
467     yw1[n] = Eigen::Matrix<unsigned int, -1, 1>::Random(1, 1)[0] % 200;
468   }
469   Eigen::Matrix<int, -1, 1> yw2(N, 1);
470   for (int n = 0; n < N; n++) {
471     yw2[n] = -(Eigen::Matrix<unsigned int, -1, 1>::Random(1, 1)[0] % 200);
472   }
473   Eigen::Matrix<double, -1, -1> x = Eigen::Matrix<double, -1, -1>::Random(N, M);
474   Eigen::Matrix<double, -1, -1> xw1
475       = Eigen::Matrix<double, -1, -1>::Random(W, M);
476   Eigen::Matrix<double, -1, -1> xw2
477       = Eigen::Matrix<double, -1, -1>::Random(N, W);
478   Eigen::Matrix<double, -1, -1> xw3
479       = Eigen::Matrix<double, -1, -1>::Random(N, M) * NAN;
480   Eigen::Matrix<double, -1, 1> alpha
481       = Eigen::Matrix<double, -1, 1>::Random(N, 1);
482   Eigen::Matrix<double, -1, 1> alphaw1
483       = Eigen::Matrix<double, -1, 1>::Random(W, 1);
484   Eigen::Matrix<double, -1, 1> alphaw2
485       = Eigen::Matrix<double, -1, 1>::Random(N, 1) * NAN;
486   Eigen::Matrix<double, -1, 1> beta
487       = Eigen::Matrix<double, -1, 1>::Random(M, 1);
488   Eigen::Matrix<double, -1, 1> betaw1
489       = Eigen::Matrix<double, -1, 1>::Random(W, 1);
490   Eigen::Matrix<double, -1, 1> betaw2
491       = Eigen::Matrix<double, -1, 1>::Random(M, 1) * NAN;
492 
493   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(yw1, x, alpha, beta),
494                std::invalid_argument);
495   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(yw2, x, alpha, beta),
496                std::domain_error);
497   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(y, xw1, alpha, beta),
498                std::invalid_argument);
499   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(y, xw2, alpha, beta),
500                std::invalid_argument);
501   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(y, xw3, alpha, beta),
502                std::domain_error);
503   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(y, x, alphaw1, beta),
504                std::invalid_argument);
505   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(y, x, alphaw2, beta),
506                std::domain_error);
507   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(y, x, alpha, betaw1),
508                std::invalid_argument);
509   EXPECT_THROW(stan::math::poisson_log_glm_lpmf(y, x, alpha, betaw2),
510                std::domain_error);
511 }
512 
TEST(ProbDistributionsPoissonLogGLM,glm_matches_poisson_log_vars_propto)513 TEST(ProbDistributionsPoissonLogGLM, glm_matches_poisson_log_vars_propto) {
514   using Eigen::Dynamic;
515   using Eigen::Matrix;
516   using stan::math::var;
517   using std::vector;
518   vector<int> y{2, 0, 1, 2, 1, 0, 0, 1, 3, 0};
519   Matrix<double, Dynamic, Dynamic> x(10, 3);
520   x << -1.87936, 0.55093, -2.50689, 4.78584, 0.988523, -2.46141, 1.46229,
521       2.21497, 1.72734, -0.916165, -0.563808, 0.165279, -0.752066, 1.43575,
522       0.296557, -0.738422, 0.438686, 0.664492, 0.518203, -0.27485, 2, 0, 1, 2,
523       1, 0, 0, 1, 3, 0;
524   Matrix<var, Dynamic, 1> beta(3, 1);
525   beta << 1.17711, 1.24432, -0.596639;
526   var alpha = -1.04272;
527   Matrix<var, Dynamic, 1> alphavec = alpha * Matrix<var, 10, 1>::Ones();
528   Matrix<var, Dynamic, 1> theta(10, 1);
529   theta = x * beta + alphavec;
530   var lp = stan::math::poisson_log_lpmf<true>(y, theta);
531   double lp_val = lp.val();
532   var lp1 = stan::math::poisson_log_glm_lpmf<true>(y, x, alpha, beta);
533   double lp1_val = lp1.val();
534   EXPECT_FLOAT_EQ(lp_val, lp1_val);
535 }
536