MethodCFMlpANN.cxx

Go to the documentation of this file.
00001 // @(#)root/tmva $Id: MethodCFMlpANN.cxx 37399 2010-12-08 15:22:07Z evt $
00002 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
00003 
00004 /**********************************************************************************
00005  * Project: TMVA - a Root-integrated toolkit for multivariate Data analysis       *
00006  * Package: TMVA                                                                  *
00007  * Class  : TMVA::MethodCFMlpANN                                                  *
00008  * Web    : http://tmva.sourceforge.net                                           *
00009  *                                                                                *
00010  * Description:                                                                   *
00011  *      Implementation (see header for description)                               *
00012  *                                                                                *
00013  * Authors (alphabetical):                                                        *
00014  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
00015  *      Xavier Prudent  <prudent@lapp.in2p3.fr>  - LAPP, France                   *
00016  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
00017  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
00018  *                                                                                *
00019  * Copyright (c) 2005:                                                            *
00020  *      CERN, Switzerland                                                         *
00021  *      U. of Victoria, Canada                                                    *
00022  *      MPI-K Heidelberg, Germany                                                 *
00023  *      LAPP, Annecy, France                                                      *
00024  *                                                                                *
00025  * Redistribution and use in source and binary forms, with or without             *
00026  * modification, are permitted according to the terms listed in LICENSE           *
00027  * (http://tmva.sourceforge.net/LICENSE)                                          *
00028  **********************************************************************************/
00029 
00030 //_______________________________________________________________________
00031 //
00032 // Begin_Html
00033 /*
00034   Interface to Clermond-Ferrand artificial neural network
00035 
00036   <p>
00037   The CFMlpANN belong to the class of Multilayer Perceptrons (MLP), which are
00038   feed-forward networks according to the following propagation schema:<br>
00039   <center>
00040   <img vspace=10 src="gif/tmva_mlp.gif" align="bottom" alt="Schema for artificial neural network">
00041   </center>
00042   The input layer contains as many neurons as input variables used in the MVA.
00043   The output layer contains two neurons for the signal and background
00044   event classes. In between the input and output layers are a variable number
00045   of <i>k</i> hidden layers with arbitrary numbers of neurons. (While the
00046   structure of the input and output layers is determined by the problem, the
00047   hidden layers can be configured by the user through the option string
00048   of the method booking.) <br>
00049 
00050   As indicated in the sketch, all neuron inputs to a layer are linear
00051   combinations of the neuron output of the previous layer. The transfer
00052   from input to output within a neuron is performed by means of an "activation
00053   function". In general, the activation function of a neuron can be
00054   zero (deactivated), one (linear), or non-linear. The above example uses
00055   a sigmoid activation function. The transfer function of the output layer
00056   is usually linear. As a consequence: an ANN without hidden layer should
00057   give identical discrimination power as a linear discriminant analysis (Fisher).
00058   In case of one hidden layer, the ANN computes a linear combination of
00059   sigmoid.  <br>
00060 
00061   The learning method used by the CFMlpANN is only stochastic.
00062 */
00063 // End_Html
00064 //_______________________________________________________________________
00065 
00066 #include <string>
00067 #include <cstdlib>
00068 #include <iostream>
00069 
00070 #include "TMatrix.h"
00071 #include "TObjString.h"
00072 #include "Riostream.h"
00073 #include "TMath.h"
00074 
00075 #include "TMVA/ClassifierFactory.h"
00076 #include "TMVA/MethodCFMlpANN.h"
00077 #include "TMVA/MethodCFMlpANN_def.h"
00078 #include "TMVA/Tools.h"
00079 
00080 REGISTER_METHOD(CFMlpANN)
00081 
00082 ClassImp(TMVA::MethodCFMlpANN)
00083 
00084 // initialization of global variable
00085 namespace TMVA {
00086    Int_t MethodCFMlpANN_nsel = 0;
00087 }
00088 
00089 TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::fgThis = 0;
00090 
00091 //_______________________________________________________________________
00092 TMVA::MethodCFMlpANN::MethodCFMlpANN( const TString& jobName,
00093                                       const TString& methodTitle,
00094                                       DataSetInfo& theData,
00095                                       const TString& theOption,
00096                                       TDirectory* theTargetDir  ) :
00097    TMVA::MethodBase( jobName, Types::kCFMlpANN, methodTitle, theData, theOption, theTargetDir  ),
00098    fData(0),
00099    fClass(0),
00100    fNlayers(0),
00101    fNcycles(0),
00102    fNodes(0),
00103    fYNN(0)
00104 {
00105    // standard constructor
00106    // option string: "n_training_cycles:n_hidden_layers"
00107    // default is:  n_training_cycles = 5000, n_layers = 4
00108    //
00109    // * note that the number of hidden layers in the NN is:
00110    //   n_hidden_layers = n_layers - 2
00111    //
00112    // * since there is one input and one output layer. The number of
00113    //   nodes (neurons) is predefined to be:
00114    //   n_nodes[i] = nvars + 1 - i (where i=1..n_layers)
00115    //
00116    //   with nvars being the number of variables used in the NN.
00117    //
00118    // Hence, the default case is: n_neurons(layer 1 (input)) : nvars
00119    //                             n_neurons(layer 2 (hidden)): nvars-1
00120    //                             n_neurons(layer 3 (hidden)): nvars-1
00121    //                             n_neurons(layer 4 (out))   : 2
00122    //
00123    // This artificial neural network usually needs a relatively large
00124    // number of cycles to converge (8000 and more). Overtraining can
00125    // be efficienctly tested by comparing the signal and background
00126    // output of the NN for the events that were used for training and
00127    // an independent data sample (with equal properties). If the separation
00128    // performance is significantly better for the training sample, the
00129    // NN interprets statistical effects, and is hence overtrained. In
00130    // this case, the number of cycles should be reduced, or the size
00131    // of the training sample increased.
00132 
00133    MethodCFMlpANN_Utils::SetLogger(&Log());
00134 
00135 }
00136 
00137 //_______________________________________________________________________
00138 TMVA::MethodCFMlpANN::MethodCFMlpANN( DataSetInfo& theData,
00139                                       const TString& theWeightFile,
00140                                       TDirectory* theTargetDir ):
00141    TMVA::MethodBase( Types::kCFMlpANN, theData, theWeightFile, theTargetDir ),
00142    fData(0),
00143    fClass(0),
00144    fNlayers(0),
00145    fNcycles(0),
00146    fNodes(0),
00147    fYNN(0)
00148 {
00149    // constructor from weight file
00150 }
00151 
00152 //_______________________________________________________________________
00153 Bool_t TMVA::MethodCFMlpANN::HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ )
00154 {
00155    // CFMlpANN can handle classification with 2 classes
00156    if (type == Types::kClassification && numberClasses == 2) return kTRUE;
00157    return kFALSE;
00158 }
00159 
00160 //_______________________________________________________________________
00161 void TMVA::MethodCFMlpANN::DeclareOptions()
00162 {
00163    // define the options (their key words) that can be set in the option string
00164    // know options: NCycles=xx              :the number of training cycles
00165    //               HiddenLayser="N-1,N-2"  :the specification of the hidden layers
00166 
00167    DeclareOptionRef( fNcycles  =3000,      "NCycles",      "Number of training cycles" );
00168    DeclareOptionRef( fLayerSpec="N,N-1",   "HiddenLayers", "Specification of hidden layer architecture" );
00169 }
00170 
00171 //_______________________________________________________________________
00172 void TMVA::MethodCFMlpANN::ProcessOptions()
00173 {
00174    // decode the options in the option string
00175    fNodes = new Int_t[20]; // number of nodes per layer (maximum 20 layers)
00176    fNlayers = 2;
00177    Int_t currentHiddenLayer = 1;
00178    TString layerSpec(fLayerSpec);
00179    while(layerSpec.Length()>0) {
00180       TString sToAdd = "";
00181       if (layerSpec.First(',')<0) {
00182          sToAdd = layerSpec;
00183          layerSpec = "";
00184       } 
00185       else {
00186          sToAdd = layerSpec(0,layerSpec.First(','));
00187          layerSpec = layerSpec(layerSpec.First(',')+1,layerSpec.Length());
00188       }
00189       Int_t nNodes = 0;
00190       if (sToAdd.BeginsWith("N") || sToAdd.BeginsWith("n")) { sToAdd.Remove(0,1); nNodes = GetNvar(); }
00191       nNodes += atoi(sToAdd);
00192       fNodes[currentHiddenLayer++] = nNodes;
00193       fNlayers++;
00194    }
00195    fNodes[0]          = GetNvar(); // number of input nodes
00196    fNodes[fNlayers-1] = 2;         // number of output nodes
00197 
00198    if (IgnoreEventsWithNegWeightsInTraining()) {
00199       Log() << kFATAL << "Mechanism to ignore events with negative weights in training not yet available for method: "
00200             << GetMethodTypeName() 
00201             << " --> please remove \"IgnoreNegWeightsInTraining\" option from booking string."
00202             << Endl;
00203    }
00204 
00205    Log() << kINFO << "Use configuration (nodes per layer): in=";
00206    for (Int_t i=0; i<fNlayers-1; i++) Log() << kINFO << fNodes[i] << ":";
00207    Log() << kINFO << fNodes[fNlayers-1] << "=out" << Endl;   
00208 
00209    // some info
00210    Log() << "Use " << fNcycles << " training cycles" << Endl;
00211 
00212    Int_t nEvtTrain = Data()->GetNTrainingEvents();
00213 
00214    // note that one variable is type
00215    if (nEvtTrain>0) {
00216     
00217       // Data LUT
00218       fData  = new TMatrix( nEvtTrain, GetNvar() );
00219       fClass = new vector<Int_t>( nEvtTrain );
00220 
00221       // ---- fill LUTs
00222 
00223       UInt_t ivar;
00224       for (Int_t ievt=0; ievt<nEvtTrain; ievt++) {
00225          const Event * ev = GetEvent(ievt);
00226 
00227          // identify signal and background events  
00228          (*fClass)[ievt] = DataInfo().IsSignal(ev) ? 1 : 2;
00229       
00230          // use normalized input Data
00231          for (ivar=0; ivar<GetNvar(); ivar++) {
00232             (*fData)( ievt, ivar ) = ev->GetValue(ivar);
00233          }
00234       }
00235 
00236       //Log() << kVERBOSE << Data()->GetNEvtSigTrain() << " Signal and " 
00237       //        << Data()->GetNEvtBkgdTrain() << " background" << " events in trainingTree" << Endl;
00238    }
00239 
00240 }
00241 
00242 //_______________________________________________________________________
00243 void TMVA::MethodCFMlpANN::Init( void )
00244 {
00245    // default initialisation called by all constructors
00246 
00247    // CFMlpANN prefers normalised input variables
00248    SetNormalised( kTRUE );
00249 
00250    // initialize all pointers
00251    fgThis = this;  
00252 
00253    // initialize dimensions
00254    TMVA::MethodCFMlpANN_nsel = 0;  
00255 }
00256 
00257 //_______________________________________________________________________
00258 TMVA::MethodCFMlpANN::~MethodCFMlpANN( void )
00259 {
00260    // destructor
00261    delete fData;
00262    delete fClass;
00263    delete[] fNodes;
00264 
00265    if (fYNN!=0) {
00266       for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
00267       delete[] fYNN;
00268       fYNN=0;
00269    }
00270 }
00271 
00272 //_______________________________________________________________________
00273 void TMVA::MethodCFMlpANN::Train( void )
00274 {
00275    // training of the Clement-Ferrand NN classifier
00276 
00277    Double_t dumDat(0);
00278    Int_t ntrain(Data()->GetNTrainingEvents());
00279    Int_t ntest(0);
00280    Int_t nvar(GetNvar());
00281    Int_t nlayers(fNlayers);
00282    Int_t *nodes = new Int_t[nlayers];
00283    Int_t ncycles(fNcycles);
00284 
00285    for (Int_t i=0; i<nlayers; i++) nodes[i] = fNodes[i]; // full copy of class member
00286 
00287    if (fYNN != 0) {
00288       for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
00289       delete[] fYNN;
00290       fYNN = 0;
00291    }
00292    fYNN = new Double_t*[nlayers];
00293    for (Int_t layer=0; layer<nlayers; layer++)
00294       fYNN[layer] = new Double_t[fNodes[layer]];
00295    
00296    // please check
00297 #ifndef R__WIN32
00298    Train_nn( &dumDat, &dumDat, &ntrain, &ntest, &nvar, &nlayers, nodes, &ncycles );
00299 #else
00300    Log() << kWARNING << "<Train> sorry CFMlpANN does not run on Windows" << Endl;
00301 #endif  
00302 
00303    delete [] nodes;
00304 }
00305 
00306 //_______________________________________________________________________
00307 Double_t TMVA::MethodCFMlpANN::GetMvaValue( Double_t* err, Double_t* errUpper )
00308 {
00309    // returns CFMlpANN output (normalised within [0,1])
00310    Bool_t isOK = kTRUE;
00311 
00312    const Event* ev = GetEvent();
00313 
00314    // copy of input variables
00315    vector<Double_t> inputVec( GetNvar() );
00316    for (UInt_t ivar=0; ivar<GetNvar(); ivar++) inputVec[ivar] = ev->GetValue(ivar);
00317 
00318    Double_t myMVA = EvalANN( inputVec, isOK );
00319    if (!isOK) Log() << kFATAL << "EvalANN returns (!isOK) for event " << Endl;
00320 
00321    // cannot determine error
00322    NoErrorCalc(err, errUpper);
00323 
00324    return myMVA;
00325 }
00326 
00327 //_______________________________________________________________________
00328 Double_t TMVA::MethodCFMlpANN::EvalANN( vector<Double_t>& inVar, Bool_t& isOK )
00329 {
00330    // evaluates NN value as function of input variables
00331 
00332    // hardcopy of input variables (necessary because they are update later)
00333    Double_t* xeev = new Double_t[GetNvar()];
00334    for (UInt_t ivar=0; ivar<GetNvar(); ivar++) xeev[ivar] = inVar[ivar];
00335   
00336    // ---- now apply the weights: get NN output
00337    isOK = kTRUE;
00338    for (UInt_t jvar=0; jvar<GetNvar(); jvar++) {
00339 
00340       if (fVarn_1.xmax[jvar] < xeev[jvar]) xeev[jvar] = fVarn_1.xmax[jvar];
00341       if (fVarn_1.xmin[jvar] > xeev[jvar]) xeev[jvar] = fVarn_1.xmin[jvar];
00342       if (fVarn_1.xmax[jvar] == fVarn_1.xmin[jvar]) {
00343          isOK = kFALSE;
00344          xeev[jvar] = 0;
00345       }
00346       else {
00347          xeev[jvar] = xeev[jvar] - ((fVarn_1.xmax[jvar] + fVarn_1.xmin[jvar])/2);    
00348          xeev[jvar] = xeev[jvar] / ((fVarn_1.xmax[jvar] - fVarn_1.xmin[jvar])/2);    
00349       }
00350    }
00351     
00352    NN_ava( xeev );
00353 
00354    Double_t retval = 0.5*(1.0 + fYNN[fParam_1.layerm-1][0]);
00355 
00356    delete [] xeev;
00357 
00358    return retval;
00359 }
00360 
00361 //_______________________________________________________________________
00362 void  TMVA::MethodCFMlpANN::NN_ava( Double_t* xeev )
00363 {  
00364    // auxiliary functions
00365    for (Int_t ivar=0; ivar<fNeur_1.neuron[0]; ivar++) fYNN[0][ivar] = xeev[ivar];
00366 
00367    for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
00368       for (Int_t j=1; j<=fNeur_1.neuron[layer]; j++) {
00369 
00370          Double_t x = Ww_ref(fNeur_1.ww, layer+1,j); // init with the bias layer
00371 
00372          for (Int_t k=1; k<=fNeur_1.neuron[layer-1]; k++) { // neurons of originating layer
00373             x += fYNN[layer-1][k-1]*W_ref(fNeur_1.w, layer+1, j, k);
00374          }
00375          fYNN[layer][j-1] = NN_fonc( layer, x );
00376       }
00377    }  
00378 }
00379 
00380 //_______________________________________________________________________
00381 Double_t TMVA::MethodCFMlpANN::NN_fonc( Int_t i, Double_t u ) const
00382 {
00383    // activation function
00384    Double_t f(0);
00385   
00386    if      (u/fDel_1.temp[i] >  170) f = +1;
00387    else if (u/fDel_1.temp[i] < -170) f = -1;
00388    else {
00389       Double_t yy = TMath::Exp(-u/fDel_1.temp[i]);
00390       f  = (1 - yy)/(1 + yy);
00391    }
00392 
00393    return f;
00394 }
00395 
00396 //_______________________________________________________________________
00397 void TMVA::MethodCFMlpANN::ReadWeightsFromStream( istream & istr )
00398 {
00399    // read back the weight from the training from file (stream)
00400    TString var;
00401 
00402    // read number of variables and classes
00403    UInt_t nva(0), lclass(0);
00404    istr >> nva >> lclass;
00405 
00406    if (GetNvar() != nva) // wrong file
00407       Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of variables" << Endl;
00408 
00409    // number of output classes must be 2
00410    if (lclass != 2) // wrong file
00411       Log() << kFATAL << "<ReadWeightsFromFile> mismatch in number of classes" << Endl;
00412 
00413    // check that we are not at the end of the file
00414    if (istr.eof( ))
00415       Log() << kFATAL << "<ReadWeightsFromStream> reached EOF prematurely " << Endl;
00416 
00417    // read extrema of input variables
00418    for (UInt_t ivar=0; ivar<GetNvar(); ivar++)
00419       istr >> fVarn_1.xmax[ivar] >> fVarn_1.xmin[ivar];
00420 
00421    // read number of layers (sum of: input + output + hidden)
00422    istr >> fParam_1.layerm;
00423 
00424    if (fYNN != 0) {
00425       for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
00426       delete[] fYNN;
00427       fYNN = 0;
00428    }
00429    fYNN = new Double_t*[fParam_1.layerm];
00430    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
00431       // read number of neurons for each layer
00432       // coverity[tainted_data_argument]
00433       istr >> fNeur_1.neuron[layer];
00434       fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
00435    }
00436 
00437    // to read dummy lines
00438    const Int_t nchar( 100 );
00439    char* dumchar = new char[nchar];
00440 
00441    // read weights
00442    for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) {
00443 
00444       Int_t nq = fNeur_1.neuron[layer]/10;
00445       Int_t nr = fNeur_1.neuron[layer] - nq*10;
00446 
00447       Int_t kk(0);
00448       if (nr==0) kk = nq;
00449       else       kk = nq+1;
00450 
00451       for (Int_t k=1; k<=kk; k++) {
00452          Int_t jmin = 10*k - 9;
00453          Int_t jmax = 10*k;
00454          if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
00455          for (Int_t j=jmin; j<=jmax; j++) {
00456             istr >> Ww_ref(fNeur_1.ww, layer+1, j);
00457          }
00458          for (Int_t i=1; i<=fNeur_1.neuron[layer-1]; i++) {
00459             for (Int_t j=jmin; j<=jmax; j++) {
00460                istr >> W_ref(fNeur_1.w, layer+1, j, i);
00461             }
00462          }
00463          // skip two empty lines
00464          istr.getline( dumchar, nchar );
00465       }
00466    }
00467 
00468    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
00469 
00470       // skip 2 empty lines
00471       istr.getline( dumchar, nchar );
00472       istr.getline( dumchar, nchar );
00473 
00474       istr >> fDel_1.temp[layer];
00475    }
00476 
00477    // sanity check
00478    if ((Int_t)GetNvar() != fNeur_1.neuron[0]) {
00479       Log() << kFATAL << "<ReadWeightsFromFile> mismatch in zeroth layer:"
00480               << GetNvar() << " " << fNeur_1.neuron[0] << Endl;
00481    }
00482 
00483    fNlayers = fParam_1.layerm;
00484    delete[] dumchar;
00485 }
00486 
00487 //_______________________________________________________________________
00488 Int_t TMVA::MethodCFMlpANN::DataInterface( Double_t* /*tout2*/, Double_t*  /*tin2*/, 
00489                                            Int_t* /* icode*/, Int_t*  /*flag*/, 
00490                                            Int_t*  /*nalire*/, Int_t* nvar, 
00491                                            Double_t* xpg, Int_t* iclass, Int_t* ikend )
00492 {
00493    // data interface function 
00494    
00495    // icode and ikend are dummies needed to match f2c mlpl3 functions
00496    *ikend = 0; 
00497 
00498    // retrieve pointer to current object (CFMlpANN must be a singleton class!)
00499    TMVA::MethodCFMlpANN* opt = TMVA::MethodCFMlpANN::This();
00500 
00501    // sanity checks
00502    if (0 == xpg) {
00503       Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface zero pointer xpg" << Endl;
00504    }
00505    if (*nvar != (Int_t)opt->GetNvar()) {
00506       Log() << kFATAL << "ERROR in MethodCFMlpANN_DataInterface mismatch in num of variables: "
00507               << *nvar << " " << opt->GetNvar() << Endl;
00508    }
00509 
00510    // fill variables
00511    *iclass = (int)opt->GetClass( TMVA::MethodCFMlpANN_nsel );
00512    for (UInt_t ivar=0; ivar<opt->GetNvar(); ivar++) 
00513       xpg[ivar] = (double)opt->GetData( TMVA::MethodCFMlpANN_nsel, ivar );
00514 
00515    ++TMVA::MethodCFMlpANN_nsel;
00516 
00517    return 0;
00518 }
00519 
00520 //_______________________________________________________________________
00521 void TMVA::MethodCFMlpANN::AddWeightsXMLTo( void* parent ) const 
00522 {
00523    // write weights to xml file
00524 
00525    void *wght = gTools().AddChild(parent, "Weights");
00526    gTools().AddAttr(wght,"NVars",fParam_1.nvar);
00527    gTools().AddAttr(wght,"NClasses",fParam_1.lclass);
00528    gTools().AddAttr(wght,"NLayers",fParam_1.layerm);
00529    void* minmaxnode = gTools().AddChild(wght, "VarMinMax");
00530    stringstream s;
00531    s.precision( 16 );
00532    for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++) 
00533       s << std::scientific << fVarn_1.xmin[ivar] <<  " " << fVarn_1.xmax[ivar] <<  " ";
00534    gTools().AddRawLine( minmaxnode, s.str().c_str() );
00535    void* neurons = gTools().AddChild(wght, "NNeurons");
00536    stringstream n;
00537    n.precision( 16 );
00538    for (Int_t layer=0; layer<fParam_1.layerm; layer++)
00539       n << std::scientific << fNeur_1.neuron[layer] << " ";
00540    gTools().AddRawLine( neurons, n.str().c_str() );
00541    for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
00542       void* layernode = gTools().AddChild(wght, "Layer"+gTools().StringFromInt(layer));
00543       gTools().AddAttr(layernode,"NNeurons",fNeur_1.neuron[layer]);
00544       void* neuronnode=NULL;
00545       for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
00546          neuronnode = gTools().AddChild(layernode,"Neuron"+gTools().StringFromInt(neuron));
00547          stringstream weights;
00548          weights.precision( 16 );         
00549          weights << std::scientific << Ww_ref(fNeur_1.ww, layer+1, neuron+1);
00550          for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
00551             weights << " " << std::scientific << W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
00552          }
00553          gTools().AddRawLine( neuronnode, weights.str().c_str() );
00554       }
00555    }
00556    void* tempnode = gTools().AddChild(wght, "LayerTemp");
00557    stringstream temp;
00558    temp.precision( 16 );
00559    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {         
00560        temp << std::scientific << fDel_1.temp[layer] << " ";
00561    }   
00562    gTools().AddRawLine(tempnode, temp.str().c_str() );
00563 }
00564 //_______________________________________________________________________
00565 void TMVA::MethodCFMlpANN::ReadWeightsFromXML( void* wghtnode )
00566 {
00567    // read weights from xml file
00568    gTools().ReadAttr( wghtnode, "NLayers",fParam_1.layerm );
00569    void* minmaxnode = gTools().GetChild(wghtnode);
00570    const char* minmaxcontent = gTools().GetContent(minmaxnode);
00571    std::stringstream content(minmaxcontent);
00572    for (UInt_t ivar=0; ivar<GetNvar(); ivar++) 
00573       content >> fVarn_1.xmin[ivar] >> fVarn_1.xmax[ivar];
00574    if (fYNN != 0) {
00575       for (Int_t i=0; i<fNlayers; i++) delete[] fYNN[i];
00576       delete[] fYNN;
00577       fYNN = 0;
00578    }
00579    fYNN = new Double_t*[fParam_1.layerm];
00580    void *layernode=gTools().GetNextChild(minmaxnode);
00581    const char* neuronscontent = gTools().GetContent(layernode);
00582    stringstream ncontent(neuronscontent);
00583    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {              
00584       // read number of neurons for each layer;
00585       // coverity[tainted_data_argument]
00586       ncontent >> fNeur_1.neuron[layer];
00587       fYNN[layer] = new Double_t[fNeur_1.neuron[layer]];
00588    }
00589    for (Int_t layer=1; layer<fParam_1.layerm; layer++) {
00590       layernode=gTools().GetNextChild(layernode);
00591       void* neuronnode=NULL;
00592       neuronnode = gTools().GetChild(layernode);
00593       for (Int_t neuron=0; neuron<fNeur_1.neuron[layer]; neuron++) {
00594          const char* neuronweights = gTools().GetContent(neuronnode);
00595          stringstream weights(neuronweights);
00596          weights >> Ww_ref(fNeur_1.ww, layer+1, neuron+1);
00597          for (Int_t i=0; i<fNeur_1.neuron[layer-1]; i++) {
00598             weights >> W_ref(fNeur_1.w, layer+1, neuron+1, i+1);
00599          }
00600          neuronnode=gTools().GetNextChild(neuronnode);
00601       }
00602    } 
00603    void* tempnode=gTools().GetNextChild(layernode);
00604    const char* temp = gTools().GetContent(tempnode);
00605    stringstream t(temp);
00606    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
00607       t >> fDel_1.temp[layer];
00608    }
00609    fNlayers = fParam_1.layerm;
00610 }
00611 
00612 //_______________________________________________________________________
00613 void TMVA::MethodCFMlpANN::PrintWeights( std::ostream & o ) const
00614 {
00615    // write the weights of the neural net
00616 
00617    // write number of variables and classes
00618    o << "Number of vars " << fParam_1.nvar << endl;
00619    o << "Output nodes   " << fParam_1.lclass << endl;
00620    
00621    // write extrema of input variables
00622    for (Int_t ivar=0; ivar<fParam_1.nvar; ivar++) 
00623       o << "Var " << ivar << " [" << fVarn_1.xmin[ivar] << " - " << fVarn_1.xmax[ivar] << "]" << endl;
00624         
00625    // write number of layers (sum of: input + output + hidden)
00626    o << "Number of layers " << fParam_1.layerm << endl;
00627    
00628    o << "Nodes per layer ";
00629    for (Int_t layer=0; layer<fParam_1.layerm; layer++)
00630       // write number of neurons for each layer
00631       o << fNeur_1.neuron[layer] << "     ";   
00632    o << endl;
00633         
00634    // write weights
00635    for (Int_t layer=1; layer<=fParam_1.layerm-1; layer++) { 
00636           
00637       Int_t nq = fNeur_1.neuron[layer]/10;
00638       Int_t nr = fNeur_1.neuron[layer] - nq*10;
00639           
00640       Int_t kk(0);
00641       if (nr==0) kk = nq;
00642       else       kk = nq+1;
00643           
00644       for (Int_t k=1; k<=kk; k++) {
00645          Int_t jmin = 10*k - 9;
00646          Int_t jmax = 10*k;
00647          Int_t i, j;
00648          if (fNeur_1.neuron[layer]<jmax) jmax = fNeur_1.neuron[layer];
00649          for (j=jmin; j<=jmax; j++) {
00650 
00651             //o << fNeur_1.ww[j*max_nLayers_ + layer - 6] << "   ";
00652             o << Ww_ref(fNeur_1.ww, layer+1, j) << "   ";
00653 
00654          }
00655          o << endl;
00656          //for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
00657          for (i=1; i<=fNeur_1.neuron[layer-1]; i++) {
00658             for (j=jmin; j<=jmax; j++) {
00659                //               o << fNeur_1.w[(i*max_nNodes_ + j)*max_nLayers_ + layer - 186] << "   ";
00660                o << W_ref(fNeur_1.w, layer+1, j, i) << "   ";
00661             }
00662             o << endl;
00663          }
00664             
00665          // skip two empty lines
00666          o << endl;
00667       }
00668    }
00669    for (Int_t layer=0; layer<fParam_1.layerm; layer++) {
00670       o << "Del.temp in layer " << layer << " :  " << fDel_1.temp[layer] << endl;
00671    }      
00672 }
00673 //_______________________________________________________________________
00674 TMVA::MethodCFMlpANN* TMVA::MethodCFMlpANN::This( void ) 
00675 { 
00676 // static pointer to this object (required for external functions
00677    return fgThis; 
00678 }  
00679 void TMVA::MethodCFMlpANN::MakeClassSpecific( std::ostream& fout, const TString& className ) const
00680 {
00681    // write specific classifier response
00682    fout << "   // not implemented for class: \"" << className << "\"" << endl;
00683    fout << "};" << endl;
00684 }
00685 
00686 //_______________________________________________________________________
00687 void TMVA::MethodCFMlpANN::MakeClassSpecificHeader( std::ostream& , const TString&  ) const
00688 {
00689    // write specific classifier response for header
00690 }
00691 
00692 //_______________________________________________________________________
00693 void TMVA::MethodCFMlpANN::GetHelpMessage() const
00694 {
00695    // get help message text
00696    //
00697    // typical length of text line: 
00698    //         "|--------------------------------------------------------------|"
00699    Log() << Endl;
00700    Log() << gTools().Color("bold") << "--- Short description:" << gTools().Color("reset") << Endl;
00701    Log() << Endl;
00702    Log() << "<None>" << Endl;
00703    Log() << Endl;
00704    Log() << gTools().Color("bold") << "--- Performance optimisation:" << gTools().Color("reset") << Endl;
00705    Log() << Endl;
00706    Log() << "<None>" << Endl;
00707    Log() << Endl;
00708    Log() << gTools().Color("bold") << "--- Performance tuning via configuration options:" << gTools().Color("reset") << Endl;
00709    Log() << Endl;
00710    Log() << "<None>" << Endl;
00711 }

Generated on Tue Jul 5 15:25:02 2011 for ROOT_528-00b_version by  doxygen 1.5.1