1## Copyright (C) 2006 Michel D. Schmid <email: michaelschmid@users.sourceforge.net> 2## 3## 4## This program is free software; you can redistribute it and/or modify it 5## under the terms of the GNU General Public License as published by 6## the Free Software Foundation; either version 2, or (at your option) 7## any later version. 8## 9## This program is distributed in the hope that it will be useful, but 10## WITHOUT ANY WARRANTY; without even the implied warranty of 11## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12## General Public License for more details. 13## 14## You should have received a copy of the GNU General Public License 15## along with this program; see the file COPYING. If not, see 16## <http://www.gnu.org/licenses/>. 17 18## -*- texinfo -*- 19## @deftypefn {Function File} {}[@var{perf}, @var{Ee}, @var{Aa}, @var{Nn}] = __calcperf (@var{net},@var{xx},@var{Im},@var{Tt}) 20## @code{__calcperf} calculates the performance of a multi-layer neural network. 21## PLEASE DON'T USE IT ELSEWHERE, it proparly won't work. 22## @end deftypefn 23 24## Author: Michel D. Schmid 25 26 27function [perf,Ee,Aa,Nn] = __calcperf(net,xx,Im,Tt) 28 29 ## comment: 30 ## perf, net performance.. from input to output through the hidden layers 31 ## Aa, output values of the hidden and last layer (output layer) 32 ## is used for NEWFF network types 33 34 ## calculate bias terms 35 ## must have the same number of columns like the input matrix Im 36 [nRows, nColumns] = size(Im); 37 Btemp = cell(net.numLayers,1); # Btemp: bias matrix 38 ones1xQ = ones(1,nColumns); 39 for i= 1:net.numLayers 40 Btemp{i} = net.b{i}(:,ones1xQ); 41 endfor 42 43 ## shortcuts 44 IWtemp = cell(net.numLayers,net.numInputs,1);# IW: input weights ... 45 LWtemp = cell(net.numLayers,net.numLayers,1);# LW: layer weights ... 46 Aa = cell(net.numLayers,1);# Outputs hidden and output layer 47 Nn = cell(net.numLayers,1);# outputs before the transfer function 48 IW = net.IW; # input weights 49 LW = net.LW; # layer weights 50 51 ## calculate the whole network till outputs are reached... 52 for iLayers = 1:net.numLayers 53 54 ## calculate first input weights to weighted inputs.. 55 ## this can be done with matrix calculation... 56 ## called "dotprod" 57 ## to do this, there must be a special matrix ... 58 ## e.g. IW = [1 2 3 4 5; 6 7 8 9 10] * [ 1 2 3; 4 5 6; 7 8 9; 10 11 12; 1 2 3]; 59 if (iLayers==1) 60 IWtemp{iLayers,1} = IW{iLayers,1} * Im; 61 onlyTempVar = [IWtemp(iLayers,1) Btemp(iLayers)]; 62 else 63 IWtemp{iLayers,1} = []; 64 endif 65 66 ## now calculate layer weights to weighted layer outputs 67 if (iLayers>1) 68 Ad = Aa{iLayers-1,1}; 69 LWtemp{iLayers,1} = LW{iLayers,iLayers-1} * Ad; 70 onlyTempVar = [LWtemp(iLayers,1) Btemp(iLayers)]; 71 else 72 LWtemp{iLayers,1} = []; 73 endif 74 75 Nn{iLayers,1} = onlyTempVar{1}; 76 for k=2:length(onlyTempVar) 77 Nn{iLayers,1} = Nn{iLayers,1} + onlyTempVar{k}; 78 endfor 79 80 ## now calculate with the transfer functions the layer output 81 switch net.layers{iLayers}.transferFcn 82 case "purelin" 83 Aa{iLayers,1} = purelin(Nn{iLayers,1}); 84 case "tansig" 85 Aa{iLayers,1} = tansig(Nn{iLayers,1}); 86 case "logsig" 87 Aa{iLayers,1} = logsig(Nn{iLayers,1}); 88 otherwise 89 error(["Transfer function: " net.layers{iLayers}.transferFcn " doesn't exist!"]) 90 endswitch 91 92 endfor # iLayers = 1:net.numLayers 93 94 ## now calc network error 95 Ee = cell(net.numLayers,1); 96 97 for i=net.numLayers 98 Ee{i,1} = Tt{i,1} - Aa{i,1};# Tt: target 99 # Ee will be the error vector cell array 100 endfor 101 102 ## now calc network performance 103 switch(net.performFcn) 104 case "mse" 105 perf = __mse(Ee); 106 otherwise 107 error("for performance functions, only mse is currently valid!") 108 endswitch 109 110endfunction 111