1 #include <stan/math/rev.hpp>
2 #include <stan/math/prim.hpp>
3 #include <gtest/gtest.h>
4 #include <vector>
5 #include <cmath>
6 
7 //  We check that the values of the new regression match those of one built
8 //  from existing primitives.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_doubles)9 TEST(ProbDistributionsBernoulliLogitGLM, glm_matches_bernoulli_logit_doubles) {
10   using Eigen::Dynamic;
11   using Eigen::Matrix;
12   using stan::math::var;
13   using std::vector;
14   vector<int> y{1, 0, 1};
15   Matrix<double, Dynamic, Dynamic> x(3, 2);
16   x << -12, 46, -42, 24, 25, 27;
17   Matrix<double, Dynamic, 1> beta(2, 1);
18   beta << 0.3, 2;
19   double alpha = 0.3;
20   Matrix<double, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
21   Matrix<double, Dynamic, 1> theta(3, 1);
22   theta = x * beta + alphavec;
23 
24   EXPECT_FLOAT_EQ((stan::math::bernoulli_logit_lpmf(y, theta)),
25                   (stan::math::bernoulli_logit_glm_lpmf(y, x, alpha, beta)));
26   EXPECT_FLOAT_EQ(
27       (stan::math::bernoulli_logit_lpmf<true>(y, theta)),
28       (stan::math::bernoulli_logit_glm_lpmf<true>(y, x, alpha, beta)));
29 }
30 
31 //  We check that the values of the new regression match those of one built
32 //  from existing primitives.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_doubles_rand)33 TEST(ProbDistributionsBernoulliLogitGLM,
34      glm_matches_bernoulli_logit_doubles_rand) {
35   using Eigen::Dynamic;
36   using Eigen::Matrix;
37   using stan::math::var;
38   using std::vector;
39   for (size_t ii = 0; ii < 200; ii++) {
40     vector<int> y(3);
41     for (size_t i = 0; i < 3; i++) {
42       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 2;
43     }
44     Matrix<double, Dynamic, Dynamic> x
45         = Matrix<double, Dynamic, Dynamic>::Random(3, 2);
46     Matrix<double, Dynamic, 1> beta
47         = Matrix<double, Dynamic, Dynamic>::Random(2, 1);
48     Matrix<double, 1, 1> alphamat = Matrix<double, 1, 1>::Random(1, 1);
49     double alpha = alphamat[0];
50     Matrix<double, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
51     Matrix<double, Dynamic, 1> theta(3, 1);
52     theta = x * beta + alphavec;
53 
54     EXPECT_FLOAT_EQ((stan::math::bernoulli_logit_lpmf(y, theta)),
55                     (stan::math::bernoulli_logit_glm_lpmf(y, x, alpha, beta)));
56     EXPECT_FLOAT_EQ(
57         (stan::math::bernoulli_logit_lpmf<true>(y, theta)),
58         (stan::math::bernoulli_logit_glm_lpmf<true>(y, x, alpha, beta)));
59   }
60 }
61 
62 //  We check that the gradients of the new regression match those of one built
63 //  from existing primitives.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_vars)64 TEST(ProbDistributionsBernoulliLogitGLM, glm_matches_bernoulli_logit_vars) {
65   using Eigen::Dynamic;
66   using Eigen::Matrix;
67   using stan::math::var;
68   using std::vector;
69   vector<int> y{1, 0, 1};
70   Matrix<var, Dynamic, Dynamic> x(3, 2);
71   x << -1234, 46, -42, 24, 25, 27;
72   Matrix<var, Dynamic, 1> beta(2, 1);
73   beta << 0.3, 2000;
74   var alpha = 0.3;
75   Matrix<var, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
76   Matrix<var, Dynamic, 1> theta(3, 1);
77   theta = x * beta + alphavec;
78 
79   var lp = stan::math::bernoulli_logit_lpmf(y, theta);
80   lp.grad();
81 
82   double lp_val = lp.val();
83   double alpha_adj = alpha.adj();
84   Matrix<double, Dynamic, Dynamic> x_adj(3, 2);
85   Matrix<double, Dynamic, 1> beta_adj(2, 1);
86   for (size_t i = 0; i < 2; i++) {
87     beta_adj[i] = beta[i].adj();
88     for (size_t j = 0; j < 3; j++) {
89       x_adj(j, i) = x(j, i).adj();
90     }
91   }
92 
93   stan::math::recover_memory();
94 
95   vector<int> y2{1, 0, 1};
96   Matrix<var, Dynamic, Dynamic> x2(3, 2);
97   x2 << -1234, 46, -42, 24, 25, 27;
98   Matrix<var, Dynamic, 1> beta2(2, 1);
99   beta2 << 0.3, 2000;
100   var alpha2 = 0.3;
101 
102   var lp2 = stan::math::bernoulli_logit_glm_lpmf(y2, x2, alpha2, beta2);
103   lp2.grad();
104 
105   EXPECT_FLOAT_EQ(lp_val, lp2.val());
106   EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
107   for (size_t i = 0; i < 2; i++) {
108     EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
109     for (size_t j = 0; j < 3; j++) {
110       EXPECT_FLOAT_EQ(x_adj(j, i), x2(j, i).adj());
111     }
112   }
113 }
114 
TEST(ProbDistributionsBernoulliLogitGLM,broadcast_x)115 TEST(ProbDistributionsBernoulliLogitGLM, broadcast_x) {
116   using Eigen::Dynamic;
117   using Eigen::Matrix;
118   using stan::math::var;
119   using std::vector;
120   vector<int> y{1, 0, 1};
121   Matrix<double, 1, Dynamic> x(1, 2);
122   x << -12, 46;
123   Matrix<var, 1, Dynamic> x1 = x;
124   Matrix<var, Dynamic, Dynamic> x_mat = x.replicate(3, 1);
125   Matrix<double, Dynamic, 1> beta(2, 1);
126   beta << 0.3, 2;
127   Matrix<var, Dynamic, 1> beta1 = beta;
128   Matrix<var, Dynamic, 1> beta2 = beta;
129   var alpha1 = 0.3;
130   var alpha2 = 0.3;
131 
132   var lp1 = stan::math::bernoulli_logit_glm_lpmf(y, x1, alpha1, beta1);
133   var lp2 = stan::math::bernoulli_logit_glm_lpmf(y, x_mat, alpha2, beta2);
134 
135   EXPECT_DOUBLE_EQ(lp1.val(), lp2.val());
136 
137   (lp1 + lp2).grad();
138 
139   for (int i = 0; i < 2; i++) {
140     EXPECT_DOUBLE_EQ(x1[i].adj(), x_mat.col(i).adj().sum());
141     EXPECT_DOUBLE_EQ(beta1[i].adj(), beta2[i].adj());
142   }
143   EXPECT_DOUBLE_EQ(alpha1.adj(), alpha2.adj());
144 }
145 
TEST(ProbDistributionsBernoulliLogitGLM,broadcast_y)146 TEST(ProbDistributionsBernoulliLogitGLM, broadcast_y) {
147   using Eigen::Dynamic;
148   using Eigen::Matrix;
149   using stan::math::var;
150   using std::vector;
151   int y = 1;
152   Matrix<int, Dynamic, 1> y_vec = Matrix<int, Dynamic, 1>::Constant(3, 1, y);
153   Matrix<double, Dynamic, Dynamic> x(3, 2);
154   x << -12, 46, -42, 24, 25, 27;
155   Matrix<var, Dynamic, Dynamic> x1 = x;
156   Matrix<var, Dynamic, Dynamic> x2 = x;
157   Matrix<double, Dynamic, 1> beta(2, 1);
158   beta << 0.3, 2;
159   Matrix<var, Dynamic, 1> beta1 = beta;
160   Matrix<var, Dynamic, 1> beta2 = beta;
161   var alpha1 = 0.3;
162   var alpha2 = 0.3;
163 
164   var lp1 = stan::math::bernoulli_logit_glm_lpmf(y, x1, alpha1, beta1);
165   var lp2 = stan::math::bernoulli_logit_glm_lpmf(y_vec, x2, alpha2, beta2);
166 
167   EXPECT_DOUBLE_EQ(lp1.val(), lp2.val());
168 
169   (lp1 + lp2).grad();
170 
171   for (int i = 0; i < 2; i++) {
172     for (int j = 0; j < 2; j++) {
173       EXPECT_DOUBLE_EQ(x1(j, i).adj(), x2(j, i).adj());
174     }
175     EXPECT_DOUBLE_EQ(beta1[i].adj(), beta2[i].adj());
176   }
177   EXPECT_DOUBLE_EQ(alpha1.adj(), alpha2.adj());
178 }
179 
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_vars_zero_instances)180 TEST(ProbDistributionsBernoulliLogitGLM,
181      glm_matches_bernoulli_logit_vars_zero_instances) {
182   using Eigen::Dynamic;
183   using Eigen::Matrix;
184   using stan::math::var;
185   using std::vector;
186   vector<int> y{};
187   Matrix<var, Dynamic, Dynamic> x(0, 2);
188   Matrix<var, Dynamic, 1> beta(2, 1);
189   beta << 0.3, 2000;
190   var alpha = 0.3;
191   Matrix<var, Dynamic, 1> theta(0, 1);
192 
193   var lp = stan::math::bernoulli_logit_lpmf(y, theta);
194   lp.grad();
195 
196   double lp_val = lp.val();
197   double alpha_adj = alpha.adj();
198   Matrix<double, Dynamic, 1> beta_adj(2, 1);
199   for (size_t i = 0; i < 2; i++) {
200     beta_adj[i] = beta[i].adj();
201   }
202 
203   stan::math::recover_memory();
204 
205   vector<int> y2{};
206   Matrix<var, Dynamic, Dynamic> x2(0, 2);
207   Matrix<var, Dynamic, 1> beta2(2, 1);
208   beta2 << 0.3, 2000;
209   var alpha2 = 0.3;
210 
211   var lp2 = stan::math::bernoulli_logit_glm_lpmf(y2, x2, alpha2, beta2);
212   lp2.grad();
213 
214   EXPECT_FLOAT_EQ(lp_val, lp2.val());
215   EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
216   for (size_t i = 0; i < 2; i++) {
217     EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
218   }
219 }
220 
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_vars_zero_attributes)221 TEST(ProbDistributionsBernoulliLogitGLM,
222      glm_matches_bernoulli_logit_vars_zero_attributes) {
223   using Eigen::Dynamic;
224   using Eigen::Matrix;
225   using stan::math::var;
226   using std::vector;
227   vector<int> y{1, 0, 1};
228   Matrix<var, Dynamic, Dynamic> x(3, 0);
229   Matrix<var, Dynamic, 1> beta(0, 1);
230   var alpha = 0.3;
231   Matrix<var, Dynamic, 1> alphavec = alpha * Matrix<double, 3, 1>::Ones();
232   Matrix<var, Dynamic, 1> theta(3, 1);
233   theta = x * beta + alphavec;
234 
235   var lp = stan::math::bernoulli_logit_lpmf(y, theta);
236   lp.grad();
237 
238   double lp_val = lp.val();
239   double alpha_adj = alpha.adj();
240 
241   stan::math::recover_memory();
242 
243   vector<int> y2{1, 0, 1};
244   Matrix<var, Dynamic, Dynamic> x2(3, 0);
245   Matrix<var, Dynamic, 1> beta2(0, 1);
246   var alpha2 = 0.3;
247 
248   var lp2 = stan::math::bernoulli_logit_glm_lpmf(y2, x2, alpha2, beta2);
249   lp2.grad();
250 
251   EXPECT_FLOAT_EQ(lp_val, lp2.val());
252   EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
253 }
254 
255 //  We check that the gradients of the new regression match those of one built
256 //  from existing primitives.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_vars_rand)257 TEST(ProbDistributionsBernoulliLogitGLM,
258      glm_matches_bernoulli_logit_vars_rand) {
259   using Eigen::Dynamic;
260   using Eigen::Matrix;
261   using stan::math::var;
262   using std::vector;
263   for (size_t ii = 0; ii < 42; ii++) {
264     vector<int> y(3);
265     for (size_t i = 0; i < 3; i++) {
266       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 2;
267     }
268     Matrix<double, Dynamic, Dynamic> xreal
269         = Matrix<double, Dynamic, Dynamic>::Random(3, 2);
270     Matrix<double, Dynamic, 1> betareal
271         = Matrix<double, Dynamic, Dynamic>::Random(2, 1);
272     Matrix<double, 1, 1> alphareal = Matrix<double, 1, 1>::Random(1, 1);
273 
274     Matrix<var, Dynamic, 1> beta = betareal;
275     Matrix<var, Dynamic, 1> theta(3, 1);
276     Matrix<var, Dynamic, Dynamic> x = xreal;
277     var alpha = alphareal[0];
278     Matrix<var, Dynamic, 1> alphavec = Matrix<double, 3, 1>::Ones() * alpha;
279 
280     theta = (x * beta) + alphavec;
281     var lp = stan::math::bernoulli_logit_lpmf(y, theta);
282 
283     lp.grad();
284 
285     double lp_val = lp.val();
286     double alpha_adj = alpha.adj();
287     Matrix<double, Dynamic, Dynamic> x_adj(3, 2);
288     Matrix<double, Dynamic, 1> beta_adj(2, 1);
289     for (size_t i = 0; i < 2; i++) {
290       beta_adj[i] = beta[i].adj();
291       for (size_t j = 0; j < 3; j++) {
292         x_adj(j, i) = x(j, i).adj();
293       }
294     }
295 
296     stan::math::recover_memory();
297 
298     Matrix<var, Dynamic, 1> beta2 = betareal;
299     Matrix<var, Dynamic, Dynamic> x2 = xreal;
300     var alpha2 = alphareal[0];
301 
302     var lp2 = stan::math::bernoulli_logit_glm_lpmf(y, x2, alpha2, beta2);
303     lp2.grad();
304 
305     EXPECT_FLOAT_EQ(lp_val, lp2.val());
306     EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
307     for (size_t i = 0; i < 2; i++) {
308       EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
309       for (size_t j = 0; j < 3; j++) {
310         EXPECT_FLOAT_EQ(x_adj(j, i), x2(j, i).adj());
311       }
312     }
313   }
314 }
315 
316 //  We check that the gradients of the new regression match those of one built
317 //  from existing primitives, in case beta is a scalar.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_vars_rand_scal_beta)318 TEST(ProbDistributionsBernoulliLogitGLM,
319      glm_matches_bernoulli_logit_vars_rand_scal_beta) {
320   using Eigen::Dynamic;
321   using Eigen::Matrix;
322   using stan::math::var;
323   using std::vector;
324   for (size_t ii = 0; ii < 42; ii++) {
325     vector<int> y(3);
326     for (size_t i = 0; i < 3; i++) {
327       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 2;
328     }
329     Matrix<double, Dynamic, Dynamic> xreal
330         = Matrix<double, Dynamic, Dynamic>::Random(3, 1);
331     double betareal = Matrix<double, Dynamic, Dynamic>::Random(1, 1)(0, 0);
332     Matrix<double, 1, 1> alphareal = Matrix<double, 1, 1>::Random(1, 1);
333     var beta = betareal;
334     Matrix<var, Dynamic, 1> theta(3, 1);
335     Matrix<var, Dynamic, Dynamic> x = xreal;
336     var alpha = alphareal[0];
337     Matrix<var, Dynamic, 1> alphavec = Matrix<double, 3, 1>::Ones() * alpha;
338     theta = (x * beta) + alphavec;
339     var lp = stan::math::bernoulli_logit_lpmf(y, theta);
340     lp.grad();
341 
342     double lp_val = lp.val();
343     double alpha_adj = alpha.adj();
344     Matrix<double, Dynamic, Dynamic> x_adj(3, 1);
345     double beta_adj = beta.adj();
346     for (size_t j = 0; j < 3; j++) {
347       x_adj(j, 0) = x(j, 0).adj();
348     }
349 
350     stan::math::recover_memory();
351 
352     var beta2 = betareal;
353     Matrix<var, Dynamic, Dynamic> x2 = xreal;
354     var alpha2 = alphareal[0];
355     var lp2 = stan::math::bernoulli_logit_glm_lpmf(y, x2, alpha2, beta2);
356     lp2.grad();
357     EXPECT_FLOAT_EQ(lp_val, lp2.val());
358     EXPECT_FLOAT_EQ(beta_adj, beta2.adj());
359     EXPECT_FLOAT_EQ(alpha_adj, alpha2.adj());
360     for (size_t j = 0; j < 3; j++) {
361       EXPECT_FLOAT_EQ(x_adj(j, 0), x2(j, 0).adj());
362     }
363   }
364 }
365 
366 //  We check that the gradients of the new regression match those of one built
367 //  from existing primitives, for the GLM with varying intercept.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_varying_intercept)368 TEST(ProbDistributionsBernoulliLogitGLM,
369      glm_matches_bernoulli_varying_intercept) {
370   using Eigen::Dynamic;
371   using Eigen::Matrix;
372   using stan::math::var;
373   using std::vector;
374   for (size_t ii = 0; ii < 42; ii++) {
375     vector<int> y(3);
376     for (size_t i = 0; i < 3; i++) {
377       y[i] = Matrix<unsigned int, 1, 1>::Random(1, 1)[0] % 2;
378     }
379     Matrix<double, Dynamic, Dynamic> xreal
380         = Matrix<double, Dynamic, Dynamic>::Random(3, 2);
381     Matrix<double, Dynamic, 1> betareal
382         = Matrix<double, Dynamic, Dynamic>::Random(2, 1);
383     Matrix<double, Dynamic, 1> alphareal
384         = Matrix<double, Dynamic, Dynamic>::Random(3, 1);
385 
386     Matrix<var, Dynamic, 1> beta = betareal;
387     Matrix<var, Dynamic, 1> theta(3, 1);
388     Matrix<var, Dynamic, Dynamic> x = xreal;
389     Matrix<var, Dynamic, 1> alpha = alphareal;
390     theta = (x * beta) + alpha;
391     var lp = stan::math::bernoulli_logit_lpmf(y, theta);
392 
393     lp.grad();
394 
395     double lp_val = lp.val();
396     Matrix<double, Dynamic, 1> alpha_adj(3, 1);
397     Matrix<double, Dynamic, Dynamic> x_adj(3, 2);
398     Matrix<double, Dynamic, 1> beta_adj(2, 1);
399     for (size_t i = 0; i < 2; i++) {
400       beta_adj[i] = beta[i].adj();
401       for (size_t j = 0; j < 3; j++) {
402         x_adj(j, i) = x(j, i).adj();
403       }
404     }
405     for (size_t j = 0; j < 3; j++) {
406       alpha_adj[j] = alpha[j].adj();
407     }
408 
409     stan::math::recover_memory();
410 
411     Matrix<var, Dynamic, 1> beta2 = betareal;
412     Matrix<var, Dynamic, Dynamic> x2 = xreal;
413     Matrix<var, Dynamic, 1> alpha2 = alphareal;
414 
415     var lp2 = stan::math::bernoulli_logit_glm_lpmf(y, x2, alpha2, beta2);
416     lp2.grad();
417 
418     EXPECT_FLOAT_EQ(lp_val, lp2.val());
419     for (size_t i = 0; i < 2; i++) {
420       EXPECT_FLOAT_EQ(beta_adj[i], beta2[i].adj());
421       for (size_t j = 0; j < 3; j++) {
422         EXPECT_FLOAT_EQ(x_adj(j, i), x2(j, i).adj());
423       }
424     }
425     for (size_t j = 0; j < 3; j++) {
426       EXPECT_FLOAT_EQ(alpha_adj[j], alpha2[j].adj());
427     }
428   }
429 }
430 
431 //  We check that we can instantiate all different interface types.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_interface_types)432 TEST(ProbDistributionsBernoulliLogitGLM,
433      glm_matches_bernoulli_logit_interface_types) {
434   using Eigen::Dynamic;
435   using Eigen::Matrix;
436   using stan::math::var;
437   using std::vector;
438   double value = 0;
439   double value2 = 0;
440 
441   int i = 1;
442   std::vector<int> vi = {{1, 0}};
443   double d = 1.0;
444   std::vector<double> vd = {{1.0, 2.0}};
445   Eigen::VectorXd ev(2);
446   Eigen::RowVectorXd rv(2);
447   Eigen::MatrixXd m1(1, 1);
448   m1 << 1.0;
449   Eigen::MatrixXd m(2, 2);
450   ev << 1.0, 2.0;
451   rv << 1.0, 2.0;
452   m << 1.0, 2.0, 3.0, 4.0;
453 
454   value += stan::math::bernoulli_logit_glm_lpmf(i, m1, d, d);
455   value += stan::math::bernoulli_logit_glm_lpmf(vi, m, vd, vd);
456   value += stan::math::bernoulli_logit_glm_lpmf(vi, m, ev, ev);
457   value += stan::math::bernoulli_logit_glm_lpmf(vi, m, rv, rv);
458 
459   var v = 1.0;
460   std::vector<var> vv = {{1.0, 2.0}};
461   Eigen::Matrix<var, -1, 1> evv(2);
462   Eigen::Matrix<var, 1, -1> rvv(2);
463   Eigen::Matrix<var, -1, -1> m1v(1, 1);
464   m1v << 1.0;
465   Eigen::Matrix<var, -1, -1> mv(2, 2);
466   evv << 1.0, 2.0;
467   rvv << 1.0, 2.0;
468   mv << 1.0, 2.0, 3.0, 4.0;
469 
470   value2 += stan::math::bernoulli_logit_glm_lpmf(i, m1v, v, v).val();
471   value2 += stan::math::bernoulli_logit_glm_lpmf(vi, mv, vv, vv).val();
472   value2 += stan::math::bernoulli_logit_glm_lpmf(vi, mv, evv, evv).val();
473   value2 += stan::math::bernoulli_logit_glm_lpmf(vi, mv, rvv, rvv).val();
474 
475   EXPECT_FLOAT_EQ(value, value2);
476 }
477 
478 //  We check that the right errors are thrown.
TEST(ProbDistributionsBernoulliLogitGLM,glm_matches_bernoulli_logit_error_checking)479 TEST(ProbDistributionsBernoulliLogitGLM,
480      glm_matches_bernoulli_logit_error_checking) {
481   using Eigen::Dynamic;
482   using Eigen::Matrix;
483   using stan::math::var;
484   using std::vector;
485   int N = 3;
486   int M = 2;
487   int W = 4;
488 
489   Eigen::Matrix<int, -1, 1> y(N, 1);
490   for (int n = 0; n < N; n++) {
491     y[n] = Eigen::Matrix<unsigned int, -1, 1>::Random(1, 1)[0] % 2;
492   }
493   Eigen::Matrix<int, -1, 1> yw1(W, 1);
494   for (int n = 0; n < W; n++) {
495     yw1[n] = Eigen::Matrix<unsigned int, -1, 1>::Random(1, 1)[0] % 2;
496   }
497   Eigen::Matrix<int, -1, 1> yw2(N, 1);
498   for (int n = 0; n < N; n++) {
499     yw2[n] = -(Eigen::Matrix<unsigned int, -1, 1>::Random(1, 1)[0] % 2);
500   }
501   Eigen::Matrix<int, -1, 1> yw3(N, 1);
502   for (int n = 0; n < N; n++) {
503     yw3[n] = 42 + (Eigen::Matrix<unsigned int, -1, 1>::Random(1, 1)[0] % 2);
504   }
505   Eigen::Matrix<double, -1, -1> x = Eigen::Matrix<double, -1, -1>::Random(N, M);
506   Eigen::Matrix<double, -1, -1> xw1
507       = Eigen::Matrix<double, -1, -1>::Random(W, M);
508   Eigen::Matrix<double, -1, -1> xw2
509       = Eigen::Matrix<double, -1, -1>::Random(N, W);
510   Eigen::Matrix<double, -1, -1> xw3
511       = Eigen::Matrix<double, -1, -1>::Random(N, M) * NAN;
512   Eigen::Matrix<double, -1, 1> alpha
513       = Eigen::Matrix<double, -1, 1>::Random(N, 1);
514   Eigen::Matrix<double, -1, 1> alphaw1
515       = Eigen::Matrix<double, -1, 1>::Random(W, 1);
516   Eigen::Matrix<double, -1, 1> alphaw2
517       = Eigen::Matrix<double, -1, 1>::Random(N, 1) * NAN;
518   Eigen::Matrix<double, -1, 1> beta
519       = Eigen::Matrix<double, -1, 1>::Random(M, 1);
520   Eigen::Matrix<double, -1, 1> betaw1
521       = Eigen::Matrix<double, -1, 1>::Random(W, 1);
522   Eigen::Matrix<double, -1, 1> betaw2
523       = Eigen::Matrix<double, -1, 1>::Random(M, 1) * NAN;
524 
525   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(yw1, x, alpha, beta),
526                std::invalid_argument);
527   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(yw2, x, alpha, beta),
528                std::domain_error);
529   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(yw3, x, alpha, beta),
530                std::domain_error);
531   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(y, xw1, alpha, beta),
532                std::invalid_argument);
533   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(y, xw2, alpha, beta),
534                std::invalid_argument);
535   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(y, xw3, alpha, beta),
536                std::domain_error);
537   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(y, x, alphaw1, beta),
538                std::invalid_argument);
539   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(y, x, alphaw2, beta),
540                std::domain_error);
541   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(y, x, alpha, betaw1),
542                std::invalid_argument);
543   EXPECT_THROW(stan::math::bernoulli_logit_glm_lpmf(y, x, alpha, betaw2),
544                std::domain_error);
545 }
546