1 /* NMF.cpp
2  *
3  * Copyright (C) 2019 David Weenink
4  *
5  * This code is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or (at
8  * your option) any later version.
9  *
10  * This code is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this work. If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "Graphics.h"
20 #include "NMF.h"
21 #include "NUMmachar.h"
22 #include "NUM2.h"
23 #include "SVD.h"
24 
25 #include "oo_DESTROY.h"
26 #include "NMF_def.h"
27 #include "oo_COPY.h"
28 #include "NMF_def.h"
29 #include "oo_EQUAL.h"
30 #include "NMF_def.h"
31 #include "oo_CAN_WRITE_AS_ENCODING.h"
32 #include "NMF_def.h"
33 #include "oo_WRITE_TEXT.h"
34 #include "NMF_def.h"
35 #include "oo_WRITE_BINARY.h"
36 #include "NMF_def.h"
37 #include "oo_READ_TEXT.h"
38 #include "NMF_def.h"
39 #include "oo_READ_BINARY.h"
40 #include "NMF_def.h"
41 #include "oo_DESCRIPTION.h"
42 #include "NMF_def.h"
43 
44 #include "enums_getText.h"
45 #include "NMF_enums.h"
46 #include "enums_getValue.h"
47 #include "NMF_enums.h"
48 
v_info()49 void structNMF :: v_info () {
50 	MelderInfo_writeLine (U"Number of rows: ", numberOfRows);
51 	MelderInfo_writeLine (U"Number of columns: ", numberOfColumns);
52 	MelderInfo_writeLine (U"Number of features: ", numberOfFeatures);
53 }
54 
55 Thing_implement (NMF, Daata, 0);
56 
57 
MATgetDivergence_ItakuraSaito(constMATVU const & ref,constMATVU const & x)58 double MATgetDivergence_ItakuraSaito (constMATVU const& ref, constMATVU const& x) {
59 	Melder_assert (ref.nrow == x.nrow);
60 	Melder_assert (ref.ncol == x.ncol);
61 	double divergence = 0.0;
62 	for (integer irow = 1; irow <= ref.nrow; irow ++)
63 		for (integer icol = 1; icol <= ref.ncol; icol ++) {
64 			const double refval = ref [irow] [icol];
65 			if (refval == 0.0)
66 				return undefined;
67 			divergence += x [irow] [icol] / refval - log (x [irow] [icol] / refval) - 1.0;
68 		}
69 	return divergence;
70 }
71 
NMF_paintFeatures(NMF me,Graphics g,integer fromFeature,integer toFeature,integer fromRow,integer toRow,double minimum,double maximum,int amplitudeScale,int scaling,bool garnish)72 void NMF_paintFeatures (NMF me, Graphics g, integer fromFeature, integer toFeature, integer fromRow, integer toRow, double minimum, double maximum, int amplitudeScale, int scaling, bool garnish) {
73 	fixUnspecifiedColumnRange (& fromFeature, & toFeature, my features.get());
74 	fixUnspecifiedRowRange (& fromRow, & toRow, my features.get());
75 
76 	autoMAT part = copy_MAT (my features.part (fromRow, toRow, fromFeature, toFeature));
77 
78 	if (minimum == 0.0 && maximum == 0.0) {
79 		minimum = NUMmin (part.get());
80 		maximum = NUMmax (part.get());
81 	}
82 
83 	Graphics_setInner (g);
84 	Graphics_setWindow (g, fromFeature, toFeature, fromRow, toRow);
85 
86 
87 	Graphics_cellArray (g, my features.part (fromRow, toRow, fromFeature, toFeature),
88 	fromFeature, toFeature, fromRow, toRow, minimum, maximum);
89 	Graphics_unsetInner (g);
90 	if (garnish)
91 		Graphics_drawInnerBox (g);
92 }
93 
NMF_paintWeights(NMF me,Graphics g,integer fromWeight,integer toWeight,integer fromRow,integer toRow,double minimum,double maximum,int amplitudeScale,int scaling,bool garnish)94 void NMF_paintWeights (NMF me, Graphics g, integer fromWeight, integer toWeight, integer fromRow, integer toRow, double minimum, double maximum, int amplitudeScale, int scaling, bool garnish) {
95 	fixUnspecifiedColumnRange (& fromWeight, & toWeight, my weights.get());
96 	fixUnspecifiedRowRange (& fromRow, & toRow, my weights.get());
97 
98 	autoMAT part = copy_MAT (my weights.part (fromRow, toRow, fromWeight, toWeight));
99 
100 	if (minimum == 0.0 && maximum == 0.0) {
101 		minimum = NUMmin (part.get());
102 		maximum = NUMmax (part.get());
103 	}
104 
105 	Graphics_setInner (g);
106 	Graphics_setWindow (g, fromWeight, toWeight, fromRow, toRow);
107 
108 
109 	Graphics_cellArray (g, my weights.part (fromRow, toRow, fromWeight, toWeight),
110 	fromWeight, toWeight, fromRow, toRow, minimum, maximum);
111 	Graphics_unsetInner (g);
112 	if (garnish)
113 		Graphics_drawInnerBox (g);
114 }
115 
NMF_create(integer numberOfRows,integer numberOfColumns,integer numberOfFeatures)116 autoNMF NMF_create (integer numberOfRows, integer numberOfColumns, integer numberOfFeatures) {
117 	try {
118 		autoNMF me = Thing_new (NMF);
119 		my numberOfRows = numberOfRows;
120 		my numberOfColumns = numberOfColumns;
121 		my numberOfFeatures = numberOfFeatures;
122 		my features = zero_MAT (numberOfRows, numberOfFeatures);
123 		my weights = zero_MAT (numberOfFeatures, numberOfColumns);
124 		return me;
125 	} catch (MelderError) {
126 		Melder_throw (U"NMF not created.");
127 	}
128 }
129 
MATmakeElementsNonNegative(MATVU const & m,int strategy)130 static void MATmakeElementsNonNegative (MATVU const& m, int strategy) {
131 	for (integer irow = 1; irow <= m.nrow; irow ++)
132 		for (integer icol = 1; icol <= m.ncol; icol ++)
133 			if (m [irow] [icol] < 0.0)
134 				m [irow] [icol] = strategy == 0 ? 0.0 : fabs (m [irow] [icol]);
135 }
136 
NMF_initializeFactorization_svd(NMF me,constMATVU const & data,kNMF_Initialization initializationMethod)137 static void NMF_initializeFactorization_svd (NMF me, constMATVU const& data, kNMF_Initialization initializationMethod) {
138 	try {
139 		autoSVD thee = SVD_createFromGeneralMatrix (data);
140 		MATmakeElementsNonNegative (thy u.get(), 1);
141 		MATmakeElementsNonNegative (thy v.get(), 1);
142 		my features.all()  <<=  thy u.verticalBand (1, my numberOfFeatures);
143 		for (integer irow = 1; irow <= my numberOfFeatures; irow ++)
144 			my weights.row (irow)  <<=  thy d [irow]  *  thy v.row (irow);
145 
146 	} catch (MelderError) {
147 		Melder_throw (me, U": cpuld not initialize by svd method.");
148 	}
149 }
150 
NMF_initializeFactorization(NMF me,constMATVU const & data,kNMF_Initialization initializationMethod)151 void NMF_initializeFactorization (NMF me, constMATVU const& data, kNMF_Initialization initializationMethod) {
152 	if (initializationMethod == kNMF_Initialization::RANDOM_UNIFORM) {
153 		const double rmin = 0.0, rmax = 1.0;
154 		randomUniform_MAT_out (my features.all(), rmin, rmax);
155 		randomUniform_MAT_out (my weights.all(), rmin, rmax);
156 	} else {
157 		NMF_initializeFactorization_svd (me, data, initializationMethod);
158 	}
159 }
160 
NMF_createFromGeneralMatrix(constMATVU const & m,integer numberOfFeatures)161 autoNMF NMF_createFromGeneralMatrix (constMATVU const& m, integer numberOfFeatures) {
162 	try {
163 		Melder_require (NUMisNonNegative (m),
164 			U"No matrix elements should be negative.");
165 		Melder_require (numberOfFeatures <= m.ncol,
166 			U"The number of features should not exceed the number of columns.");
167 		autoNMF me = NMF_create (m.nrow, m.ncol, numberOfFeatures);
168 		return me;
169 	} catch (MelderError) {
170 		Melder_throw (U"NMF cannot be created.");
171 	}
172 }
173 
NMF_getEuclideanDistance(NMF me,constMATVU const & data)174 double NMF_getEuclideanDistance (NMF me, constMATVU const& data) {
175 	Melder_require (data.nrow == my numberOfRows && data.ncol == my numberOfColumns,
176 		U"Dimensions should match.");
177 	autoMAT synthesis = NMF_synthesize (me);
178 	synthesis.get()  -=  data;
179 	return NUMnorm (synthesis.get(), 2.0);
180 }
181 
NMF_getItakuraSaitoDivergence(NMF me,constMATVU const & data)182 double NMF_getItakuraSaitoDivergence (NMF me, constMATVU const& data) {
183 	Melder_require (data.nrow == my numberOfRows && data.ncol == my numberOfColumns,
184 		U"Dimensions should match.");
185 	autoMAT synthesis = NMF_synthesize (me);
186 	return MATgetDivergence_ItakuraSaito (data, synthesis.get());
187 }
188 
getMaximumChange(constMATVU const & m,MATVU const & m0,const double sqrteps)189 static double getMaximumChange (constMATVU const& m, MATVU const& m0, const double sqrteps) {
190 	double min = NUMmin (m0);
191 	double max = NUMmax (m0);
192 	const double extremum1 = std::max (fabs (min), fabs (max));
193 	m0  -=  m;
194 	min = NUMmin (m0);
195 	max = NUMmax (m0);
196 	const double extremum2 = std::max (fabs (min), fabs (max));
197 	const double dmat = extremum2 / (sqrteps + extremum1);
198 	return dmat;
199 }
200 
201 /*
202 	Calculating elementwise matrix multiplication, division and addition m = m0 .* (numerm ./(denom + eps))
203 	Set elements < zero_threshold to zero
204 */
205 
update(MATVU const & m,constMATVU const & m0,constMATVU const & numer,constMATVU const & denom,double zeroThreshold,double maximum)206 static const void update (MATVU const& m, constMATVU const& m0, constMATVU const& numer, constMATVU const& denom, double zeroThreshold, double maximum) {
207 	Melder_assert (m.nrow == m0.nrow && m.ncol == m0.ncol);
208 	Melder_assert (m.nrow == numer.nrow && m.ncol == numer.ncol);
209 	Melder_assert (m.nrow == denom.nrow && m.ncol == denom.ncol);
210 	/*
211 		The value 1e-9 is OK for matrices with values that are larger than 1.
212 		For matrices with very small values we have to scale the divByZeroAvoidance value
213 		otherwise the precision would suffer.
214 		A scaling with the maximum value seems reasonable.
215 	*/
216 	const double divByZeroAvoidance = 1e-09 * ( maximum < 1.0 ? maximum : 1.0 );
217 	for (integer irow = 1; irow <= m.nrow; irow ++)
218 		for (integer icol = 1; icol <= m.ncol; icol++) {
219 			if (m0 [irow] [icol] == 0.0 || numer [irow] [icol]  == 0.0) {
220 				m [irow] [icol] = 0.0;
221 			} else {
222 				double update = m0 [irow] [icol] * (numer [irow] [icol] / (denom [irow] [icol] + divByZeroAvoidance));
223 				m [irow] [icol] = ( update < zeroThreshold ? 0.0 : update );
224 			}
225 		}
226 }
227 
228 /*
229 	Algorithm for Non-negative Matrix Factorization by multiplicative updates.
230 	The algoritm is inspired by the nmf_mu.c algorithm in libNMF by
231 	A. Janecek, S. Schulze Grotthoff & W.N. Gangsterer (2011):
232 		"LIBNMF - A library for nonnegative matrix factorization."
233 		Computing and informatics% #30: 205--224.
234 
235 */
NMF_improveFactorization_mu(NMF me,constMATVU const & data,integer maximumNumberOfIterations,double changeTolerance,double approximationTolerance,bool info)236 void NMF_improveFactorization_mu (NMF me, constMATVU const& data, integer maximumNumberOfIterations, double changeTolerance, double approximationTolerance, bool info) {
237 	try {
238 		Melder_require (my numberOfColumns == data.ncol,
239 			U"The number of columns should be equal.");
240 		Melder_require (my numberOfRows == data.nrow,
241 			U"The number of rows should be equal.");
242 
243 		autoMAT productFtD = zero_MAT (my numberOfFeatures, my numberOfColumns); // calculations of F'D
244 		autoMAT productFtFW = zero_MAT (my numberOfFeatures, my numberOfColumns); // calculations of F'F W
245 		autoMAT weights0 = zero_MAT (my numberOfFeatures, my numberOfColumns);
246 
247 		autoMAT productDWt = zero_MAT (my numberOfRows, my numberOfFeatures); // calculations of DW'
248 		autoMAT productFWWt = zero_MAT (my numberOfRows, my numberOfFeatures); // calculations of FWW'
249 		autoMAT features0 = zero_MAT (my numberOfRows, my numberOfFeatures);
250 
251 		autoMAT productWWt = zero_MAT (my numberOfFeatures, my numberOfFeatures); // calculations of WW'
252 		autoMAT productFtF = zero_MAT (my numberOfFeatures, my numberOfFeatures); // calculations of F'F
253 
254 		const double traceDtD = NUMtrace2 (data.transpose(), data); // for distance calculation
255 		features0.all()  <<=  my features.all();
256 		weights0.all()  <<=  my weights.all();
257 
258 		if (! NUMfpp)
259 			NUMmachar ();
260 
261 		const double eps = NUMfpp -> eps;
262 		const double sqrteps = sqrt (eps);
263 		const double maximum = NUMmax (data);
264 		double dnorm0 = 0.0;
265 		integer iter = 1;
266 		bool convergence = false;
267 
268 		while (iter <= maximumNumberOfIterations && not convergence) {
269 			/*
270 				while iter < maxinter and not convergence
271 					(1) W = W .* (F'*D) ./ (F'*F*W + 10^^−9^)
272 					(2) F = F .* (D*W') ./ (F*W*W' + 10^^−9^)
273 					(3) test for convergence
274 				endwhile
275 			*/
276 
277 			// 1. Update W matrix
278 			features0.all()  <<=  my features.all();
279 			weights0.all()  <<=  my weights.all();
280 			mul_MAT_out (productFtD.get(), features0.transpose(), data);
281 			mul_MAT_out (productFtF.get(), features0.transpose(), features0.get());
282 			mul_MAT_out (productFtFW.get(), productFtF.get(), weights0.get());
283 			update (my weights.get(), weights0.get(), productFtD.get(), productFtFW.get(), eps, maximum);
284 
285 			// 2. Update F matrix
286 			mul_MAT_out (productDWt.get(), data, my weights.transpose()); // productDWt = data*weights'
287 			mul_MAT_out (productWWt.get(), my weights.get(), my weights.transpose()); // work1 = weights*weights'
288 			mul_MAT_out (productFWWt.get(), features0.get(), productWWt.get()); // productFWWt = features0 * work1
289 			update (my features.get(), features0.get(), productDWt.get(), productFWWt.get(), eps, maximum);
290 
291 			/* 3. Convergence test:
292 				The Frobenius norm ||D-FW|| of a matrix can be written as
293 				||D-FW||=trace(D'D) − 2trace(W'F'D) + trace(W'F'FW)
294 						=trace(D'D) - 2trace(W'(F'D))+trace((F'F)(WW'))
295 				This saves us from explicitly calculating the reconstruction FW because we already have performed most of
296 				the needed matrix multiplications in the update step.
297 			*/
298 
299 			const double traceWtFtD  = NUMtrace2 (my weights.transpose(), productFtD.get());
300 			const double traceWtFtFW = NUMtrace2 (productFtF.get(), productWWt.get());
301 			const double distance = sqrt (std::max (traceDtD - 2.0 * traceWtFtD + traceWtFtFW, 0.0)); // just in case
302 			const double dnorm = distance / (my numberOfRows * my numberOfColumns);
303 			const double df = getMaximumChange (my features.get(), features0.get(), sqrteps);
304 			const double dw = getMaximumChange (my weights.get(), weights0.get(), sqrteps);
305 			const double delta = std::max (df, dw);
306 			convergence = ( iter > 1 && (delta < changeTolerance || dnorm < dnorm0 * approximationTolerance) );
307 			if (info)
308 				MelderInfo_writeLine (U"Iteration: ", iter, U", dnorm: ", dnorm, U", delta: ", delta);
309 
310 			dnorm0 = dnorm;
311 			++ iter;
312 		}
313 		if (info)
314 			MelderInfo_drain();
315 	} catch (MelderError) {
316 		Melder_throw (me, U" factorization cannot be improved.");
317 	}
318 }
319 
NMF_improveFactorization_als(NMF me,constMATVU const & data,integer maximumNumberOfIterations,double changeTolerance,double approximationTolerance,bool info)320 void NMF_improveFactorization_als (NMF me, constMATVU const& data, integer maximumNumberOfIterations, double changeTolerance, double approximationTolerance, bool info) {
321 	try {
322 		Melder_require (my numberOfColumns == data.ncol, U"The number of columns should be equal.");
323 		Melder_require (my numberOfRows == data.nrow, U"The number of rows should be equal.");
324 
325 		autoMAT productFtD = zero_MAT (my numberOfFeatures, my numberOfColumns); // calculations of F'D
326 		autoMAT productWDt = zero_MAT (my numberOfFeatures, my numberOfRows); // calculations of WD'
327 
328 		autoMAT weights0 = zero_MAT (my numberOfFeatures, my numberOfColumns);
329 		autoMAT features0 = zero_MAT (my numberOfRows, my numberOfFeatures);
330 
331 		autoMAT productFtF = zero_MAT (my numberOfFeatures, my numberOfFeatures); // calculations of F'F
332 		autoMAT productWWt = zero_MAT (my numberOfFeatures, my numberOfFeatures); // calculations of WW'
333 
334 		autoSVD svd_WWt = SVD_create (my numberOfFeatures, my numberOfFeatures); // solving W*W'*F' = W*D'
335 		autoSVD svd_FtF = SVD_create (my numberOfFeatures, my numberOfFeatures); // solving F´*F*W = F'*D
336 
337 		const double traceDtD = NUMtrace2 (data.transpose(), data); // for distance calculation
338 
339 		if (! NUMfpp)
340 			NUMmachar ();
341 		const double eps = NUMfpp -> eps;
342 		const double sqrteps = sqrt (eps);
343 		double dnorm0 = 0.0;
344 		integer iter = 1;
345 		bool convergence = false;
346 
347 		while (iter <= maximumNumberOfIterations && not convergence) {
348 			/*
349 				for iter to maxiter
350 					(1) W is solution of F´*F*W = F'*D.
351 					    Set all negative elements in W to 0.
352 					(2) F is solution of W*W'*F' = W*D' .
353 					    Set all negative elements in F to 0.
354 					(3) test for convergence
355 				endfor
356 			*/
357 
358 			/*
359 				1. Solve equations for new W:  F´*F*W = F'*D
360 			*/
361 			weights0.all()  <<=  my weights.all();   // save previous weights for convergence test
362 			mul_MAT_out (productFtD.get(), my features.transpose(), data);
363 			mul_MAT_out (productFtF.get(), my features.transpose(), my features.get());
364 
365 			svd_FtF -> u.all()  <<=  productFtF.all();
366 			SVD_compute (svd_FtF.get());
367 			SVD_solve_preallocated (svd_FtF.get(), productFtD.get(), my weights.get());
368 			MATmakeElementsNonNegative (my weights.get(), 0);
369 
370 			/*
371 				2. Solve equations for new F:  W*W'*F' = W*D'
372 			*/
373 			features0.all()  <<=  my features.all();   // save previous features for convergence test
374 			mul_MAT_out  (productWDt.get(), my weights.get(), data.transpose());
375 			mul_MAT_out (productWWt.get(), my weights.get(), my weights.transpose());
376 
377 			svd_WWt -> u.all()  <<=  productWWt.all();
378 			SVD_compute (svd_WWt.get());
379 			SVD_solve_preallocated (svd_WWt.get(), productWDt.get(), my features.transpose());
380 
381 			/*
382 				3. Convergence test.
383 			*/
384 			const double traceWtFtD  = NUMtrace2 (my weights.transpose(), productFtD.get());
385 			const double traceWtFtFW = NUMtrace2 (productFtF.get(), productWWt.get());
386 			const double distance = sqrt (std::max (traceDtD - 2.0 * traceWtFtD + traceWtFtFW, 0.0));   // just in case
387 			const double dnorm = distance / (my numberOfRows * my numberOfColumns);
388 			const double df = getMaximumChange (my features.get(), features0.get(), sqrteps);
389 			const double dw = getMaximumChange (my weights.get(), weights0.get(), sqrteps);
390 			const double delta = std::max (df, dw);
391 
392 			convergence = ( iter > 1 && (delta < changeTolerance || dnorm < dnorm0 * approximationTolerance) );
393 			if (info)
394 				MelderInfo_writeLine (U"Iteration: ", iter, U", dnorm: ", dnorm, U", delta: ", delta);
395 			dnorm0 = dnorm;
396 			++ iter;
397 		}
398 		if (info)
399 			MelderInfo_drain();
400 	} catch (MelderError) {
401 		Melder_throw (me, U" ALS factorization cannot be improved.");
402 	}
403 }
404 
VECinvertAndScale(VECVU const & target,constVECVU const & source,double scaleFactor)405 static void VECinvertAndScale (VECVU const& target, constVECVU const& source, double scaleFactor) {
406 	Melder_assert (target.size == source.size);
407 	for (integer i = 1; i <= target.size; i ++)
408 		target [i] = scaleFactor / source [i];
409 }
410 
NMF_improveFactorization_is(NMF me,constMATVU const & data,integer maximumNumberOfIterations,double changeTolerance,double approximationTolerance,bool info)411 void NMF_improveFactorization_is (NMF me, constMATVU const& data, integer maximumNumberOfIterations, double changeTolerance, double approximationTolerance, bool info) {
412 	try {
413 		Melder_require (my numberOfColumns == data.ncol, U"The number of columns should be equal.");
414 		Melder_require (my numberOfRows == data.nrow, U"The number of rows should be equal.");
415 		Melder_require (NUMhasZeroElement (data) == false,
416 			U"The data matrix should not have cells that are zero.");
417 		autoMAT vk = raw_MAT (data.nrow, data.ncol);
418 		autoMAT fw = raw_MAT (data.nrow, data.ncol);
419 		autoMAT fcol_x_wrow = raw_MAT (data.nrow, data.ncol);
420 		autoVEC fcolumn_inv = raw_VEC (data.nrow); // feature column
421 		autoVEC wrow_inv = raw_VEC (data.ncol); // weight row
422 		mul_MAT_out (fw.get(), my features.get(), my weights.get());
423 		double divergence = MATgetDivergence_ItakuraSaito (data, fw.get());
424 		const double divergence0 = divergence;
425 		if (info)
426 			MelderInfo_writeLine (U"Iteration: 0", U" divergence: ", divergence, U" delta: ", divergence);
427 		integer iter = 1;
428 		bool convergence = false;
429 		while (iter <= maximumNumberOfIterations && not convergence) {
430 			/*
431 				C. Févotte, N. Berin & J.-L. Durrieu (2009), Nonnegative matrix facorization with the Itakura-Saito divergene: with
432 					applications to music analysis, Neural Computation 21, 793--830.
433 				algorithm 2, page 806
434 				until convergence {
435 					for k to numberOfFeateres {
436 						G(k) = fcol(k) x wrow (k) / F.H                           (1)
437 						V(k) = G(k)^(.2).V+(1-G(k)).(fcol(k) x wrow (k))          (2)
438 						wrow (k) <-- (1/fcol(k))' . V(k) / numberOfRows           (3)
439 						fcol(k) <-- V(k).(1/wrow (k))' / numberOfColumns          (4)
440 						Normalize fcol(k) and wrow (k)                            (5)
441 						F.H - old(fcol(k) x wrow (k)) + new(fcol(k) x wrow (k))    (6)
442 					}
443 				}
444 				There is no need to calculate G(k) explicitly as in (1).
445 				We can calculate the elements of G(k) while we are doing (2).
446 			*/
447 			for (integer kf = 1; kf <= my numberOfFeatures; kf ++) {
448 				// (1) and (2)
449 				outer_MAT_out (fcol_x_wrow.get(), my features.column (kf), my weights.row (kf));
450 				for (integer irow = 1; irow <= data.nrow; irow ++)
451 					for (integer icol = 1; icol <= data.ncol; icol ++) {
452 						double gk = fcol_x_wrow [irow] [icol] / fw [irow] [icol];
453 						vk [irow] [icol] = gk * gk * data [irow] [icol] + (1.0 - gk) * fcol_x_wrow [irow] [icol];
454 					}
455 				// (3)
456 				VECinvertAndScale (fcolumn_inv.get(), my features.column (kf), 1.0 / my numberOfRows);
457 				mul_VEC_out (my weights.row (kf), fcolumn_inv.get(), vk.get());
458 				// (4)
459 				VECinvertAndScale (wrow_inv.get(), my weights.row (kf), 1.0 / my numberOfColumns);
460 				mul_VEC_out (my features.column (kf), vk.get(), wrow_inv.get());
461 				// (5)
462 				double fcolumn_norm = NUMnorm (my features.column (kf), 2.0);
463 				my features.column (kf)  /=  fcolumn_norm;
464 				my weights.row (kf)  *=  fcolumn_norm;
465 				// (6)
466 				fw.get()  -=  fcol_x_wrow.get();
467 				outer_MAT_out (fcol_x_wrow.get(), my features.column (kf), my weights.row (kf));
468 				fw.get()  +=  fcol_x_wrow.get();
469 			}
470 			const double divergence_update = MATgetDivergence_ItakuraSaito (data, fw.get());
471 			const double delta = divergence - divergence_update;
472 			convergence = ( iter > 1 && (fabs (delta) < changeTolerance || divergence_update < divergence0 * approximationTolerance) );
473 			if (info)
474 				MelderInfo_writeLine (U"Iteration: ", iter, U" divergence: ", divergence_update, U" delta: ", delta);
475 			++ iter;
476 			divergence = divergence_update;
477 		}
478 		if (info)
479 			MelderInfo_drain();
480 
481 	} catch (MelderError) {
482 		Melder_throw (me, U" IS factorization cannot be improved.");
483 	}
484 }
485 
NMF_synthesize(NMF me)486 autoMAT NMF_synthesize (NMF me) {
487 	try {
488 		autoMAT result = mul_MAT (my features.get(), my weights.get());
489 		return result;
490 	} catch (MelderError) {
491 		Melder_throw (me, U": No matrix created.");
492 	}
493 }
494 
495 /* End of file NMF.cpp */
496