1 #pragma once
2 /* MAT.h
3 *
4 * Copyright (C) 2017-2020 Paul Boersma
5 *
6 * This code is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This code is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 * See the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this work. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
21 inline void operator (MATVU const& target, double number) noexcept { \
22 integer mindim = ( target.rowStride < target.colStride ? 1 : 2 ); \
23 if (mindim == 1) \
24 for (integer icol = 1; icol <= target.ncol; icol ++) \
25 for (integer irow = 1; irow <= target.nrow; irow ++) \
26 target [irow] [icol] op number; \
27 else \
28 for (integer irow = 1; irow <= target.nrow; irow ++) \
29 for (integer icol = 1; icol <= target.ncol; icol ++) \
30 target [irow] [icol] op number; \
31 }
32 GENERATE_FIVE_TENSOR_FUNCTIONS
33 #undef GENERATE_ONE_TENSOR_FUNCTION
34
35 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
36 inline void operator (MATVU const& target, constMATVU const& x) { \
37 Melder_assert (target.nrow == x.nrow); \
38 Melder_assert (target.ncol == x.ncol); \
39 integer mindim = ( target.rowStride < target.colStride ? 1 : 2 ); \
40 if (mindim == 1) \
41 for (integer icol = 1; icol <= target.ncol; icol ++) \
42 for (integer irow = 1; irow <= target.nrow; irow ++) \
43 target [irow] [icol] op x [irow] [icol]; \
44 else \
45 for (integer irow = 1; irow <= target.nrow; irow ++) \
46 for (integer icol = 1; icol <= target.ncol; icol ++) \
47 target [irow] [icol] op x [irow] [icol]; \
48 }
49 GENERATE_FIVE_TENSOR_FUNCTIONS
50 #undef GENERATE_ONE_TENSOR_FUNCTION
51
52 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
53 inline void operator (MATVU const& target, constVECVU const& x) { \
54 Melder_assert (target.ncol == x.size); \
55 integer mindim = ( target.rowStride < target.colStride ? 1 : 2 ); \
56 if (mindim == 1) \
57 for (integer icol = 1; icol <= target.ncol; icol ++) \
58 for (integer irow = 1; irow <= target.nrow; irow ++) \
59 target [irow] [icol] op x [icol]; \
60 else \
61 for (integer irow = 1; irow <= target.nrow; irow ++) \
62 for (integer icol = 1; icol <= target.ncol; icol ++) \
63 target [irow] [icol] op x [icol]; \
64 }
65 GENERATE_FIVE_TENSOR_FUNCTIONS
66 #undef GENERATE_ONE_TENSOR_FUNCTION
67
68 struct TypeMATadd_MAT_NUM { constMATVU const& x; double number; };
69 inline TypeMATadd_MAT_NUM operator+ (constMATVU const& x, double number) { return { x, number }; }
70 inline TypeMATadd_MAT_NUM operator+ (double number, constMATVU const& x) { return { x, number }; }
71 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
72 inline void operator (MATVU const& target, TypeMATadd_MAT_NUM const& expr) noexcept { \
73 Melder_assert (expr.x.nrow == target.nrow); \
74 Melder_assert (expr.x.ncol == target.ncol); \
75 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
76 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
77 target [irow] [icol] op expr.x [irow] [icol] + expr.number; \
78 }
79 GENERATE_FIVE_TENSOR_FUNCTIONS
80 #undef GENERATE_ONE_TENSOR_FUNCTION
add_MAT(constMATVU const & x,double number)81 inline autoMAT add_MAT (constMATVU const& x, double number) {
82 autoMAT result = raw_MAT (x.nrow, x.ncol);
83 result.all() <<= x + number;
84 return result;
85 }
86
87 struct TypeMATmultiply_MAT_NUM { constMATVU const& x; double number; };
88 inline TypeMATmultiply_MAT_NUM operator* (constMATVU const& x, double number) { return { x, number }; }
89 inline TypeMATmultiply_MAT_NUM operator* (double number, constMATVU const& x) { return { x, number }; }
90 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
91 inline void operator (MATVU const& target, TypeMATmultiply_MAT_NUM const& expr) noexcept { \
92 Melder_assert (expr.x.nrow == target.nrow); \
93 Melder_assert (expr.x.ncol == target.ncol); \
94 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
95 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
96 target [irow] [icol] op expr.x [irow] [icol] * expr.number; \
97 }
98 GENERATE_FIVE_TENSOR_FUNCTIONS
99 #undef GENERATE_ONE_TENSOR_FUNCTION
multiply_MAT(constMATVU const & x,double number)100 inline autoMAT multiply_MAT (constMATVU const& x, double number) {
101 autoMAT result = raw_MAT (x.nrow, x.ncol);
102 result.all() <<= x * number;
103 return result;
104 }
105
106 struct TypeMATsubtract_MAT_NUM { constMATVU const& x; double number; };
107 inline TypeMATsubtract_MAT_NUM operator- (constMATVU const& x, double number) { return { x, number }; }
108 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
109 inline void operator (MATVU const& target, TypeMATsubtract_MAT_NUM const& expr) noexcept { \
110 Melder_assert (expr.x.nrow == target.nrow); \
111 Melder_assert (expr.x.ncol == target.ncol); \
112 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
113 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
114 target [irow] [icol] op expr.x [irow] [icol] - expr.number; \
115 }
116 GENERATE_FIVE_TENSOR_FUNCTIONS
117 #undef GENERATE_ONE_TENSOR_FUNCTION
subtract_MAT(constMATVU const & x,double number)118 inline autoMAT subtract_MAT (constMATVU const& x, double number) {
119 autoMAT result = raw_MAT (x.nrow, x.ncol);
120 result.all() <<= x - number;
121 return result;
122 }
123
124 struct TypeMATsubtract_NUM_MAT { double number; constMATVU const& x; };
125 inline TypeMATsubtract_NUM_MAT operator- (double number, constMATVU const& x) { return { number, x }; }
126 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
127 inline void operator (MATVU const& target, TypeMATsubtract_NUM_MAT const& expr) noexcept { \
128 Melder_assert (expr.x.nrow == target.nrow); \
129 Melder_assert (expr.x.ncol == target.ncol); \
130 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
131 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
132 target [irow] [icol] op expr.number - expr.x [irow] [icol]; \
133 }
134 GENERATE_FIVE_TENSOR_FUNCTIONS
135 #undef GENERATE_ONE_TENSOR_FUNCTION
subtract_MAT(double number,constMATVU const & x)136 inline autoMAT subtract_MAT (double number, constMATVU const& x) {
137 autoMAT result = raw_MAT (x.nrow, x.ncol);
138 result.all() <<= number - x;
139 return result;
140 }
141
142 struct TypeMATadd_MAT_VEC { constMATVU const& x; constVECVU const& y; };
143 inline TypeMATadd_MAT_VEC operator+ (constMATVU const& x, constVECVU const& y) { return { x, y }; }
144 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
145 inline void operator (MATVU const& target, TypeMATadd_MAT_VEC const& expr) noexcept { \
146 Melder_assert (expr.x.nrow == target.nrow); \
147 Melder_assert (expr.x.ncol == target.ncol); \
148 Melder_assert (expr.x.ncol == expr.y.size); \
149 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
150 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
151 target [irow] [icol] op expr.x [irow] [icol] + expr.y [icol]; \
152 }
153 GENERATE_FIVE_TENSOR_FUNCTIONS
154 #undef GENERATE_ONE_TENSOR_FUNCTION
add_MAT(constMATVU const & x,constVECVU const & y)155 inline autoMAT add_MAT (constMATVU const& x, constVECVU const& y) {
156 autoMAT result = raw_MAT (x.nrow, x.ncol);
157 result.all() <<= x + y;
158 return result;
159 }
160
161 struct TypeMATmultiply_MAT_VEC { constMATVU const& x; constVECVU const& y; };
162 inline TypeMATmultiply_MAT_VEC operator* (constMATVU const& x, constVECVU const& y) { return { x, y }; }
163 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
164 inline void operator (MATVU const& target, TypeMATmultiply_MAT_VEC const& expr) noexcept { \
165 Melder_assert (expr.x.nrow == target.nrow); \
166 Melder_assert (expr.x.ncol == target.ncol); \
167 Melder_assert (expr.x.ncol == expr.y.size); \
168 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
169 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
170 target [irow] [icol] op expr.x [irow] [icol] * expr.y [icol]; \
171 }
172 GENERATE_FIVE_TENSOR_FUNCTIONS
173 #undef GENERATE_ONE_TENSOR_FUNCTION
multiply_MAT(constMATVU const & x,constVECVU const & y)174 inline autoMAT multiply_MAT (constMATVU const& x, constVECVU const& y) {
175 autoMAT result = raw_MAT (x.nrow, x.ncol);
176 result.all() <<= x * y;
177 return result;
178 }
179
180 struct TypeMATsubtract_MAT_VEC { constMATVU const& x; constVECVU const& y; };
181 inline TypeMATsubtract_MAT_VEC operator- (constMATVU const& x, constVECVU const& y) { return { x, y }; }
182 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
183 inline void operator (MATVU const& target, TypeMATsubtract_MAT_VEC const& expr) noexcept { \
184 Melder_assert (expr.x.nrow == target.nrow); \
185 Melder_assert (expr.x.ncol == target.ncol); \
186 Melder_assert (expr.x.ncol == expr.y.size); \
187 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
188 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
189 target [irow] [icol] op expr.x [irow] [icol] - expr.y [icol]; \
190 }
191 GENERATE_FIVE_TENSOR_FUNCTIONS
192 #undef GENERATE_ONE_TENSOR_FUNCTION
subtract_MAT(constMATVU const & x,constVECVU const & y)193 inline autoMAT subtract_MAT (constMATVU const& x, constVECVU const& y) {
194 autoMAT result = raw_MAT (x.nrow, x.ncol);
195 result.all() <<= x - y;
196 return result;
197 }
198
199 struct TypeMATadd_VEC_MAT { constVECVU const& x; constMATVU const& y; };
200 inline TypeMATadd_VEC_MAT operator+ (constVECVU const& x, constMATVU const& y) { return { x, y }; }
201 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
202 inline void operator (MATVU const& target, TypeMATadd_VEC_MAT const& expr) noexcept { \
203 Melder_assert (expr.y.nrow == expr.x.size); \
204 Melder_assert (expr.y.nrow == target.nrow); \
205 Melder_assert (expr.y.ncol == target.ncol); \
206 for (integer irow = 1; irow <= target.nrow; irow ++) \
207 for (integer icol = 1; icol <= target.ncol; icol ++) \
208 target [irow] [icol] op expr.x [irow] + expr.y [irow] [icol]; \
209 }
210 GENERATE_FIVE_TENSOR_FUNCTIONS
211 #undef GENERATE_ONE_TENSOR_FUNCTION
add_MAT(constVECVU const & x,constMATVU const & y)212 inline autoMAT add_MAT (constVECVU const& x, constMATVU const& y) {
213 autoMAT result = raw_MAT (y.nrow, y.ncol);
214 result.all() <<= x + y;
215 return result;
216 }
217
218 struct TypeMATmultiply_VEC_MAT { constVECVU const& x; constMATVU const& y; };
219 inline TypeMATmultiply_VEC_MAT operator* (constVECVU const& x, constMATVU const& y) { return { x, y }; }
220 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
221 inline void operator (MATVU const& target, TypeMATmultiply_VEC_MAT const& expr) noexcept { \
222 Melder_assert (expr.y.nrow == expr.x.size); \
223 Melder_assert (expr.y.nrow == target.nrow); \
224 Melder_assert (expr.y.ncol == target.ncol); \
225 for (integer irow = 1; irow <= target.nrow; irow ++) \
226 for (integer icol = 1; icol <= target.ncol; icol ++) \
227 target [irow] [icol] op expr.x [irow] * expr.y [irow] [icol]; \
228 }
229 GENERATE_FIVE_TENSOR_FUNCTIONS
230 #undef GENERATE_ONE_TENSOR_FUNCTION
multiply_MAT(constVECVU const & x,constMATVU const & y)231 inline autoMAT multiply_MAT (constVECVU const& x, constMATVU const& y) {
232 autoMAT result = raw_MAT (y.nrow, y.ncol);
233 result.all() <<= x * y;
234 return result;
235 }
236
237 struct TypeMATsubtract_VEC_MAT { constVECVU const& x; constMATVU const& y; };
238 inline TypeMATsubtract_VEC_MAT operator- (constVECVU const& x, constMATVU const& y) { return { x, y }; }
239 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
240 inline void operator (MATVU const& target, TypeMATsubtract_VEC_MAT const& expr) noexcept { \
241 Melder_assert (expr.y.nrow == expr.x.size); \
242 Melder_assert (expr.y.nrow == target.nrow); \
243 Melder_assert (expr.y.ncol == target.ncol); \
244 for (integer irow = 1; irow <= target.nrow; irow ++) \
245 for (integer icol = 1; icol <= target.ncol; icol ++) \
246 target [irow] [icol] op expr.x [irow] - expr.y [irow] [icol]; \
247 }
248 GENERATE_FIVE_TENSOR_FUNCTIONS
249 #undef GENERATE_ONE_TENSOR_FUNCTION
subtract_MAT(constVECVU const & x,constMATVU const & y)250 inline autoMAT subtract_MAT (constVECVU const& x, constMATVU const& y) {
251 autoMAT result = raw_MAT (y.nrow, y.ncol);
252 result.all() <<= x - y;
253 return result;
254 }
255
256 struct TypeMATadd_MAT_MAT { constMATVU const& x; constMATVU const& y; };
257 inline TypeMATadd_MAT_MAT operator+ (constMATVU const& x, constMATVU const& y) { return { x, y }; }
258 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
259 inline void operator (MATVU const& target, TypeMATadd_MAT_MAT const& expr) noexcept { \
260 Melder_assert (expr.x.nrow == target.nrow); \
261 Melder_assert (expr.x.ncol == target.ncol); \
262 Melder_assert (expr.x.nrow == expr.y.nrow); \
263 Melder_assert (expr.x.ncol == expr.y.ncol); \
264 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
265 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
266 target [irow] [icol] op expr.x [irow] [icol] + expr.y [irow] [icol]; \
267 }
268 GENERATE_FIVE_TENSOR_FUNCTIONS
269 #undef GENERATE_ONE_TENSOR_FUNCTION
add_MAT(constMATVU const & x,constMATVU const & y)270 inline autoMAT add_MAT (constMATVU const& x, constMATVU const& y) {
271 autoMAT result = raw_MAT (x.nrow, x.ncol);
272 result.all() <<= x + y;
273 return result;
274 }
275
276 struct TypeMATmultiply_MAT_MAT { constMATVU const& x; constMATVU const& y; };
277 inline TypeMATmultiply_MAT_MAT operator* (constMATVU const& x, constMATVU const& y) { return { x, y }; }
278 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
279 inline void operator (MATVU const& target, TypeMATmultiply_MAT_MAT const& expr) noexcept { \
280 Melder_assert (expr.x.nrow == target.nrow); \
281 Melder_assert (expr.x.ncol == target.ncol); \
282 Melder_assert (expr.x.nrow == expr.y.nrow); \
283 Melder_assert (expr.x.ncol == expr.y.ncol); \
284 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
285 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
286 target [irow] [icol] op expr.x [irow] [icol] * expr.y [irow] [icol]; \
287 }
288 GENERATE_FIVE_TENSOR_FUNCTIONS
289 #undef GENERATE_ONE_TENSOR_FUNCTION
multiply_MAT(constMATVU const & x,constMATVU const & y)290 inline autoMAT multiply_MAT (constMATVU const& x, constMATVU const& y) {
291 autoMAT result = raw_MAT (x.nrow, x.ncol);
292 result.all() <<= x * y;
293 return result;
294 }
295
296 struct TypeMATsubtract_MAT_MAT { constMATVU const& x; constMATVU const& y; };
297 inline TypeMATsubtract_MAT_MAT operator- (constMATVU const& x, constMATVU const& y) { return { x, y }; }
298 #define GENERATE_ONE_TENSOR_FUNCTION(operator, op) \
299 inline void operator (MATVU const& target, TypeMATsubtract_MAT_MAT const& expr) noexcept { \
300 Melder_assert (expr.x.nrow == target.nrow); \
301 Melder_assert (expr.x.ncol == target.ncol); \
302 Melder_assert (expr.x.nrow == expr.y.nrow); \
303 Melder_assert (expr.x.ncol == expr.y.ncol); \
304 for (integer irow = 1; irow <= expr.x.nrow; irow ++) \
305 for (integer icol = 1; icol <= expr.x.ncol; icol ++) \
306 target [irow] [icol] op expr.x [irow] [icol] - expr.y [irow] [icol]; \
307 }
308 GENERATE_FIVE_TENSOR_FUNCTIONS
309 #undef GENERATE_ONE_TENSOR_FUNCTION
subtract_MAT(constMATVU const & x,constMATVU const & y)310 inline autoMAT subtract_MAT (constMATVU const& x, constMATVU const& y) {
311 autoMAT result = raw_MAT (x.nrow, x.ncol);
312 result.all() <<= x - y;
313 return result;
314 }
315
316 /*
317 Make the average of each column zero.
318 a[i][j] -= a[.][j]
319 */
320 extern void centreEachColumn_MAT_inout (MATVU const& x) noexcept;
321
322 /*
323 Make the average of each row zero.
324 a[i][j] -= a[i][.]
325 */
326 extern void centreEachRow_MAT_inout (MATVU const& x) noexcept;
327
328 /*
329 Make the average of every column and every row zero.
330 a[i][j] += - a[i][.] - a[.][j] + a[.][.]
331 */
332 extern void doubleCentre_MAT_inout (MATVU const& x) noexcept;
333
334 extern void mtm_MAT_out (MATVU const& target, constMATVU const& x) noexcept;
mtm_MAT(constMATVU const & x)335 inline autoMAT mtm_MAT (constMATVU const& x) {
336 autoMAT result = raw_MAT (x.ncol, x.ncol);
337 mtm_MAT_out (result.get(), x);
338 return result;
339 }
340
341 /*
342 Precise matrix multiplication, using pairwise summation.
343 */
344 extern void _mul_MAT_out (MATVU const& target, constMATVU const& x, constMATVU const& y) noexcept;
mul_MAT_out(MATVU const & target,constMATVU const & x,constMATVU const & y)345 inline void mul_MAT_out (MATVU const& target, constMATVU const& x, constMATVU const& y) noexcept {
346 Melder_assert (target.nrow == x.nrow);
347 Melder_assert (target.ncol == y.ncol);
348 Melder_assert (x.ncol == y.nrow);
349 _mul_MAT_out (target, x, y);
350 }
mul_MAT(constMATVU const & x,constMATVU const & y)351 inline autoMAT mul_MAT (constMATVU const& x, constMATVU const& y) {
352 autoMAT result = raw_MAT (x.nrow, y.ncol);
353 mul_MAT_out (result.all(), x, y);
354 return result;
355 }
356 /*
357 Faster multiplication of large matrices,
358 which allocates new matrices to make x.colStride and y.rowStride 1
359 (unless they are already 1).
360 Because of the use of malloc, this function may not be thread-safe.
361 */
362 extern void _mul_forceAllocation_MAT_out (MATVU const& target, constMATVU x, constMATVU y);
mul_forceAllocation_MAT_out(MATVU const & target,constMATVU x,constMATVU y)363 inline void mul_forceAllocation_MAT_out (MATVU const& target, constMATVU x, constMATVU y) {
364 Melder_assert (target.nrow == x.nrow);
365 Melder_assert (target.ncol == y.ncol);
366 Melder_assert (x.ncol == y.nrow);
367 _mul_forceAllocation_MAT_out (target, x, y);
368 }
newMATmul_forceAllocation(constMATVU const & x,constMATVU const & y)369 inline autoMAT newMATmul_forceAllocation (constMATVU const& x, constMATVU const& y) {
370 autoMAT result = raw_MAT (x.nrow, y.ncol);
371 mul_forceAllocation_MAT_out (result.all(), x, y);
372 return result;
373 }
374 /*
375 The faster of mul_forceAllocation_MAT_out and mul_MAT_out.
376 Because of the use of malloc, this function may not be thread-safe.
377 */
378 extern void _mul_allowAllocation_MAT_out (MATVU const& target, constMATVU x, constMATVU y);
mul_allowAllocation_MAT_out(MATVU const & target,constMATVU x,constMATVU y)379 inline void mul_allowAllocation_MAT_out (MATVU const& target, constMATVU x, constMATVU y) {
380 Melder_assert (target.nrow == x.nrow);
381 Melder_assert (target.ncol == y.ncol);
382 Melder_assert (x.ncol == y.nrow);
383 _mul_allowAllocation_MAT_out (target, x, y);
384 }
mul_allowAllocation_MAT(constMATVU const & x,constMATVU const & y)385 inline autoMAT mul_allowAllocation_MAT (constMATVU const& x, constMATVU const& y) {
386 autoMAT result = raw_MAT (x.nrow, y.ncol);
387 mul_allowAllocation_MAT_out (result.all(), x, y);
388 return result;
389 }
390 /*
391 Rough matrix multiplication, using an in-cache inner loop if that is faster.
392 */
393 extern void _mul_fast_MAT_out (MATVU const& target, constMATVU const& x, constMATVU const& y) noexcept;
mul_fast_MAT_out(MATVU const & target,constMATVU const & x,constMATVU const & y)394 inline void mul_fast_MAT_out (MATVU const& target, constMATVU const& x, constMATVU const& y) noexcept {
395 Melder_assert (target.nrow == x.nrow);
396 Melder_assert (target.ncol == y.ncol);
397 Melder_assert (x.ncol == y.nrow);
398 _mul_fast_MAT_out (target, x, y);
399 }
mul_fast_MAT(constMATVU const & x,constMATVU const & y)400 inline autoMAT mul_fast_MAT (constMATVU const& x, constMATVU const& y) {
401 autoMAT result = raw_MAT (x.nrow, y.ncol);
402 mul_fast_MAT_out (result.all(), x, y);
403 return result;
404 }
405 void MATmul_forceMetal_ (MATVU const& target, constMATVU const& x, constMATVU const& y);
406 void MATmul_forceOpenCL_ (MATVU const& target, constMATVU const& x, constMATVU const& y);
407
408 void outer_MAT_out (MATVU const& target, constVECVU const& x, constVECVU const& y);
409 extern autoMAT outer_MAT (constVECVU const& x, constVECVU const& y);
410
411 extern autoMAT peaks_MAT (constVECVU const& x, bool includeEdges, int interpolate, bool sortByHeight);
412
413 void power_MAT_out (MATVU const& target, constMATVU const& mat, double power);
power_MAT(constMATVU const & mat,double power)414 inline autoMAT power_MAT (constMATVU const& mat, double power) {
415 autoMAT result = raw_MAT (mat.nrow, mat.ncol);
416 power_MAT_out (result.all(), mat, power);
417 return result;
418 }
419
randomGauss_MAT_out(MATVU const & target,double mu,double sigma)420 inline void randomGauss_MAT_out (MATVU const& target, double mu, double sigma) noexcept {
421 for (integer irow = 1; irow <= target.nrow; irow ++)
422 for (integer icol = 1; icol <= target.ncol; icol ++)
423 target [irow] [icol] = NUMrandomGauss (mu, sigma);
424 }
randomGauss_MAT(integer nrow,integer ncol,double mu,double sigma)425 inline autoMAT randomGauss_MAT (integer nrow, integer ncol, double mu, double sigma) {
426 autoMAT result = raw_MAT (nrow, ncol);
427 randomGauss_MAT_out (result.all(), mu, sigma);
428 return result;
429 }
randomGauss_MAT(constMATVU const & model,double mu,double sigma)430 inline autoMAT randomGauss_MAT (constMATVU const& model, double mu, double sigma) {
431 autoMAT result = raw_MAT (model.nrow, model.ncol);
432 randomGauss_MAT_out (result.all(), mu, sigma);
433 return result;
434 }
435
randomUniform_MAT_out(MATVU const & target,double lowest,double highest)436 inline void randomUniform_MAT_out (MATVU const& target, double lowest, double highest) noexcept {
437 for (integer irow = 1; irow <= target.nrow; irow ++)
438 for (integer icol = 1; icol <= target.ncol; icol ++)
439 target [irow] [icol] = NUMrandomUniform (lowest, highest);
440 }
randomUniform_MAT(integer nrow,integer ncol,double lowest,double highest)441 inline autoMAT randomUniform_MAT (integer nrow, integer ncol, double lowest, double highest) {
442 autoMAT result = raw_MAT (nrow, ncol);
443 randomUniform_MAT_out (result.all(), lowest, highest);
444 return result;
445 }
randomUniform_MAT(constMATVU const & model,double lowest,double highest)446 inline autoMAT randomUniform_MAT (constMATVU const& model, double lowest, double highest) {
447 autoMAT result = raw_MAT (model.nrow, model.ncol);
448 randomUniform_MAT_out (result.all(), lowest, highest);
449 return result;
450 }
451
sin_MAT_inout(MATVU const & mat)452 inline void sin_MAT_inout (MATVU const& mat) noexcept {
453 for (integer irow = 1; irow <= mat.nrow; irow ++)
454 for (integer icol = 1; icol <= mat.ncol; icol ++)
455 mat [irow] [icol] = sin (mat [irow] [icol]);
456 }
457
subtractReversed_MAT_inout(MATVU const & x,double number)458 inline void subtractReversed_MAT_inout (MATVU const& x, double number) noexcept {
459 for (integer irow = 1; irow <= x.nrow; irow ++)
460 for (integer icol = 1; icol <= x.ncol; icol ++)
461 x [irow] [icol] = number - x [irow] [icol];
462 }
subtractReversed_MAT_inout(MATVU const & x,constMATVU const & y)463 inline void subtractReversed_MAT_inout (MATVU const& x, constMATVU const& y) noexcept {
464 Melder_assert (y.nrow == x.nrow && y.ncol == x.ncol);
465 for (integer irow = 1; irow <= x.nrow; irow ++)
466 for (integer icol = 1; icol <= x.ncol; icol ++)
467 x [irow] [icol] = y [irow] [icol] - x [irow] [icol];
468 }
469
transpose_mustBeSquare_MAT_inout(MATVU const & x)470 inline void transpose_mustBeSquare_MAT_inout (MATVU const& x) noexcept {
471 Melder_assert (x.nrow == x.ncol);
472 integer n = x.nrow;
473 for (integer i = 1; i < n; i ++)
474 for (integer j = i + 1; j <= n; j ++)
475 std::swap (x [i] [j], x [j] [i]);
476 }
transpose_MAT_out(MATVU const & target,constMATVU const & x)477 inline void transpose_MAT_out (MATVU const& target, constMATVU const& x) noexcept {
478 Melder_assert (x.nrow == target.ncol && x.ncol == target.nrow);
479 for (integer irow = 1; irow <= target.nrow; irow ++)
480 for (integer icol = 1; icol <= target.ncol; icol ++)
481 target [irow] [icol] = x [icol] [irow];
482 }
transpose_MAT(constMATVU const & x)483 inline autoMAT transpose_MAT (constMATVU const& x) {
484 autoMAT result = raw_MAT (x.ncol, x.nrow);
485 transpose_MAT_out (result.get(), x);
486 return result;
487 }
488
489 /* End of file MAT.h */
490