MethodCFMlpANN_Utils.cxx

Go to the documentation of this file.
00001 // @(#)root/tmva $Id: MethodCFMlpANN_Utils.cxx 37181 2010-12-02 13:45:02Z evt $
00002 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
00003 
00004 /**********************************************************************************
00005  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
00006  * Package: TMVA                                                                  *
00007  * Class  : TMVA::MethodCFMlpANN_utils                                            *
00008  * Web    : http://tmva.sourceforge.net                                           *
00009  *                                                                                *
00010  * Reference for the original FORTRAN version "mlpl3.F":                          *
00011  *      Authors  : J. Proriol and contributions from ALEPH-Clermont-Ferrand       *
00012  *                 Team members                                                   *
00013  *      Copyright: Laboratoire Physique Corpusculaire                             *
00014  *                 Universite de Blaise Pascal, IN2P3/CNRS                        *
00015  *                                                                                *
00016  * Modifications by present authors:                                              *
00017  *      use dynamical data tables (not for all of them, but for the big ones)     *
00018  *                                                                                *
00019  * Description:                                                                   *
00020  *      Utility routine translated from original mlpl3.F FORTRAN routine          *
00021  *                                                                                *
00022  *      MultiLayerPerceptron : Training code                                      *
00023  *                                                                                *
00024  *        NTRAIN: Nb of events used during the learning                           *
00025  *        NTEST:  Nb of events used for the test                                  *
00026  *        TIN:    Input variables                                                 *
00027  *        TOUT:   type of the event                                               *
00028  *                                                                                *
00029  *  ----------------------------------------------------------------------------  *
00030  *                                                                                *
00031  * Authors (alphabetical):                                                        *
00032  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
00033  *      Xavier Prudent  <prudent@lapp.in2p3.fr>  - LAPP, France                   *
00034  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
00035  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
00036  *                                                                                *
00037  * Copyright (c) 2005:                                                            *
00038  *      CERN, Switzerland                                                         *
00039  *      U. of Victoria, Canada                                                    *
00040  *      MPI-K Heidelberg, Germany                                                 *
00041  *      LAPP, Annecy, France                                                      *
00042  *                                                                                *
00043  * Redistribution and use in source and binary forms, with or without             *
00044  * modification, are permitted according to the terms listed in LICENSE           *
00045  * (http://tmva.sourceforge.net/LICENSE)                                          *
00046  *                                                                                *
00047  **********************************************************************************/
00048 
00049 //_______________________________________________________________________
00050 //
00051 // Implementation of Clermond-Ferrand artificial neural network
00052 //
00053 // Reference for the original FORTRAN version "mlpl3.F":
00054 //      Authors  : J. Proriol and contributions from ALEPH-Clermont-Ferrand
00055 //                 Team members
00056 //      Copyright: Laboratoire Physique Corpusculaire
00057 //                 Universite de Blaise Pascal, IN2P3/CNRS
00058 //_______________________________________________________________________
00059 
00060 #include <string>
00061 #include <iostream>
00062 #include <cstdlib>
00063 
00064 #include "TMath.h"
00065 #include "TString.h"
00066 
00067 #include "TMVA/MethodCFMlpANN_Utils.h"
00068 #include "TMVA/Timer.h"
00069 
00070 using std::cout;
00071 using std::endl;
00072 
00073 ClassImp(TMVA::MethodCFMlpANN_Utils)
00074    
00075 Int_t       TMVA::MethodCFMlpANN_Utils::fg_100         = 100;
00076 Int_t       TMVA::MethodCFMlpANN_Utils::fg_0           = 0;
00077 Int_t       TMVA::MethodCFMlpANN_Utils::fg_max_nVar_   = max_nVar_;
00078 Int_t       TMVA::MethodCFMlpANN_Utils::fg_max_nNodes_ = max_nNodes_;
00079 Int_t       TMVA::MethodCFMlpANN_Utils::fg_999         = 999;
00080 const char* TMVA::MethodCFMlpANN_Utils::fg_MethodName  = "--- CFMlpANN                 ";
00081 
00082 TMVA::MethodCFMlpANN_Utils::MethodCFMlpANN_Utils()
00083 {
00084    // default constructor
00085    Int_t i(0);
00086    for(i=0; i<max_nVar_;++i) fVarn_1.xmin[i] = 0;
00087    fCost_1.ancout = 0;
00088    fCost_1.ieps = 0;
00089    fCost_1.tolcou = 0;
00090 
00091    for(i=0; i<max_nNodes_;++i) fDel_1.coef[i] = 0;
00092    for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.del[i] = 0;
00093    for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delta[i] = 0;
00094    for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fDel_1.delw[i] = 0;
00095    for(i=0; i<max_nLayers_*max_nNodes_;++i) fDel_1.delww[i] = 0;
00096    fDel_1.demin = 0;
00097    fDel_1.demax = 0;
00098    fDel_1.idde = 0;
00099    for(i=0; i<max_nLayers_;++i) fDel_1.temp[i] = 0;
00100 
00101    for(i=0; i<max_nNodes_;++i) fNeur_1.cut[i] = 0;
00102    for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.deltaww[i] = 0;
00103    for(i=0; i<max_nLayers_;++i) fNeur_1.neuron[i] = 0;
00104    for(i=0; i<max_nNodes_;++i) fNeur_1.o[i] = 0;
00105    for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) fNeur_1.w[i] = 0;
00106    for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.ww[i] = 0;
00107    for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.x[i] = 0;
00108    for(i=0; i<max_nLayers_*max_nNodes_;++i) fNeur_1.y[i] = 0;
00109       
00110    fParam_1.eeps = 0;
00111    fParam_1.epsmin = 0;
00112    fParam_1.epsmax = 0;
00113    fParam_1.eta = 0;
00114    fParam_1.ichoi = 0;
00115    fParam_1.itest = 0;
00116    fParam_1.layerm = 0;
00117    fParam_1.lclass = 0;
00118    fParam_1.nblearn = 0;
00119    fParam_1.ndiv = 0;
00120    fParam_1.ndivis = 0;
00121    fParam_1.nevl = 0;
00122    fParam_1.nevt = 0;
00123    fParam_1.nunap = 0;
00124    fParam_1.nunilec = 0;
00125    fParam_1.nunishort = 0;
00126    fParam_1.nunisor = 0;
00127    fParam_1.nvar = 0;
00128 
00129    fVarn_1.iclass = 0;
00130    for(i=0; i<max_Events_;++i) fVarn_1.mclass[i] = 0;
00131    for(i=0; i<max_Events_;++i) fVarn_1.nclass[i] = 0;
00132    for(i=0; i<max_nVar_;++i) fVarn_1.xmax[i] = 0;
00133 
00134    fLogger = 0;
00135 }
00136 
00137 TMVA::MethodCFMlpANN_Utils::~MethodCFMlpANN_Utils() 
00138 {
00139    // destructor
00140 }
00141 
00142 void TMVA::MethodCFMlpANN_Utils::Train_nn( Double_t *tin2, Double_t *tout2, Int_t *ntrain, 
00143                                            Int_t *ntest, Int_t *nvar2, Int_t *nlayer, 
00144                                            Int_t *nodes, Int_t *ncycle )
00145 {
00146    // training interface - called from MethodCFMlpANN class object
00147 
00148    // sanity checks
00149    if (*ntrain + *ntest > max_Events_) {
00150       printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
00151               " events exceeds hardcoded maximum - reset to maximum allowed number");
00152       *ntrain = *ntrain*(max_Events_/(*ntrain + *ntest));
00153       *ntest  = *ntest *(max_Events_/(*ntrain + *ntest));
00154    }
00155    if (*nvar2 > max_nVar_) {
00156       printf( "*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
00157               " exceeds hardcoded maximum ==> abort");
00158       std::exit(1);
00159    }
00160    if (*nlayer > max_nLayers_) {
00161       printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
00162               " exceeds hardcoded maximum - reset to maximum allowed number");
00163       *nlayer = max_nLayers_;
00164    }
00165    if (*nodes > max_nNodes_) {
00166       printf( "*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
00167               " exceeds hardcoded maximum - reset to maximum allowed number");
00168       *nodes = max_nNodes_;
00169    }
00170 
00171    // create dynamic data tables (AH)
00172    fVarn2_1.Create( *ntrain + *ntest, *nvar2 );
00173    fVarn3_1.Create( *ntrain + *ntest, *nvar2 );
00174 
00175    Int_t imax;
00176    char det[20];
00177 
00178    Entree_new(nvar2, det, ntrain, ntest, nlayer, nodes, ncycle, (Int_t)20);
00179    if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
00180       imax = 2;
00181       fParam_1.lclass = 2;
00182    } 
00183    else {
00184       imax = fNeur_1.neuron[fParam_1.layerm - 1] << 1;
00185       fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
00186    }
00187    fParam_1.nvar = fNeur_1.neuron[0];
00188    TestNN();
00189    Innit(det, tout2, tin2, (Int_t)20);
00190 
00191    // delete data tables
00192    fVarn2_1.Delete();
00193    fVarn3_1.Delete();
00194 }
00195 
00196 void TMVA::MethodCFMlpANN_Utils::Entree_new( Int_t *, char *, Int_t *ntrain, 
00197                                              Int_t *ntest, Int_t *numlayer, Int_t *nodes, 
00198                                              Int_t *numcycle, Int_t /*det_len*/)
00199 {
00200    // first initialisation of ANN
00201    Int_t i__1;
00202 
00203    Int_t rewrite, i__, j, ncoef;
00204    Int_t ntemp, num, retrain;
00205 
00206    /* NTRAIN: Nb of events used during the learning */
00207    /* NTEST: Nb of events used for the test */
00208    /* TIN: Input variables */
00209    /* TOUT: type of the event */
00210  
00211    fCost_1.ancout = 1e30;
00212 
00213    /* .............. HardCoded Values .................... */
00214    retrain  = 0;
00215    rewrite  = 1000;
00216    for (i__ = 1; i__ <= max_nNodes_; ++i__) {
00217       fDel_1.coef[i__ - 1] = (Float_t)0.;
00218    }
00219    for (i__ = 1; i__ <= max_nLayers_; ++i__) {
00220       fDel_1.temp[i__ - 1] = (Float_t)0.;
00221    }
00222    fParam_1.layerm = *numlayer;
00223    if (fParam_1.layerm > max_nLayers_) {
00224       printf("Error: number of layers exceeds maximum: %i, %i ==> abort",
00225              fParam_1.layerm, max_nLayers_ );
00226       Arret("modification of mlpl3_param_lim.inc is needed ");
00227    }
00228    fParam_1.nevl = *ntrain;
00229    fParam_1.nevt = *ntest;
00230    fParam_1.nblearn = *numcycle;
00231    fVarn_1.iclass = 2;
00232    fParam_1.nunilec = 10;
00233    fParam_1.epsmin = 1e-10;
00234    fParam_1.epsmax = 1e-4;
00235    fParam_1.eta = .5;
00236    fCost_1.tolcou = 1e-6;
00237    fCost_1.ieps = 2;
00238    fParam_1.nunisor = 30;
00239    fParam_1.nunishort = 48;
00240    fParam_1.nunap = 40;
00241    
00242    ULog() << kINFO << "Total number of events for training: " << fParam_1.nevl << Endl;
00243    ULog() << kINFO << "Total number of training cycles    : " << fParam_1.nblearn << Endl;
00244    if (fParam_1.nevl > max_Events_) {
00245       printf("Error: number of learning events exceeds maximum: %i, %i ==> abort",
00246              fParam_1.nevl, max_Events_ );
00247       Arret("modification of mlpl3_param_lim.inc is needed ");
00248    }
00249    if (fParam_1.nevt > max_Events_) {
00250       printf("Error: number of testing events exceeds maximum: %i, %i ==> abort",
00251              fParam_1.nevt, max_Events_ );
00252       Arret("modification of mlpl3_param_lim.inc is needed ");
00253    }
00254    i__1 = fParam_1.layerm;
00255    for (j = 1; j <= i__1; ++j) {
00256       num = nodes[j-1];
00257       if (num < 2) {
00258          num = 2;
00259       }
00260       if (j == fParam_1.layerm && num != 2) {
00261          num = 2;
00262       }
00263       fNeur_1.neuron[j - 1] = num;
00264    }
00265    i__1 = fParam_1.layerm;
00266    for (j = 1; j <= i__1; ++j) {
00267       ULog() << kINFO << "Number of layers for neuron(" << j << "): " << fNeur_1.neuron[j - 1] << Endl;
00268    }
00269    if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
00270       printf("Error: wrong number of classes at ouput layer: %i != 2 ==> abort\n",
00271              fNeur_1.neuron[fParam_1.layerm - 1]);
00272       Arret("stop");
00273    }
00274    i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
00275    for (j = 1; j <= i__1; ++j) {
00276       fDel_1.coef[j - 1] = 1.;
00277    }
00278    i__1 = fParam_1.layerm;
00279    for (j = 1; j <= i__1; ++j) {
00280       fDel_1.temp[j - 1] = 1.;
00281    }
00282    fParam_1.ichoi = retrain;
00283    fParam_1.ndivis = rewrite;
00284    fDel_1.idde = 1;
00285    if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
00286       printf( "Big troubles !!! \n" );
00287       Arret("new training or continued one !");
00288    }
00289    if (fParam_1.ichoi == 0) {
00290       ULog() << kINFO << "New training will be performed" << Endl;
00291    }
00292    else {
00293       printf("%s: New training will be continued from a weight file\n", fg_MethodName);
00294    }
00295    ncoef = 0;
00296    ntemp = 0;
00297    for (i__ = 1; i__ <= max_nNodes_; ++i__) {
00298       if (fDel_1.coef[i__ - 1] != (Float_t)0.) {
00299          ++ncoef;
00300       }
00301    }
00302    for (i__ = 1; i__ <= max_nLayers_; ++i__) {
00303       if (fDel_1.temp[i__ - 1] != (Float_t)0.) {
00304          ++ntemp;
00305       }
00306    }
00307    if (ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
00308       Arret(" entree error code 1 : need to reported");
00309    }
00310    if (ntemp != fParam_1.layerm) {
00311       Arret("entree error code 2 : need to reported");
00312    }
00313 }
00314 
00315 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00316 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
00317 
00318 void TMVA::MethodCFMlpANN_Utils::Wini()
00319 {
00320    // [smart comments to be added]
00321    Int_t i__1, i__2, i__3;
00322    Int_t i__, j;
00323    Int_t layer;
00324 
00325    i__1 = fParam_1.layerm;
00326    for (layer = 2; layer <= i__1; ++layer) {
00327       i__2 = fNeur_1.neuron[layer - 2];
00328       for (i__ = 1; i__ <= i__2; ++i__) {
00329          i__3 = fNeur_1.neuron[layer - 1];
00330          for (j = 1; j <= i__3; ++j) {
00331             w_ref(layer, j, i__) = (Sen3a() * 2. - 1.) * .2;
00332             ww_ref(layer, j) = (Sen3a() * 2. - 1.) * .2;
00333          }
00334       }
00335    }
00336 }
00337 
00338 #undef ww_ref
00339 #undef w_ref
00340 
00341 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
00342 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00343 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
00344 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
00345 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
00346 
00347 void TMVA::MethodCFMlpANN_Utils::En_avant(Int_t *ievent)
00348 {
00349    // [smart comments to be added]
00350    Int_t i__1, i__2, i__3;
00351 
00352    Double_t f;
00353    Int_t i__, j;
00354    Int_t layer;
00355    
00356    i__1 = fNeur_1.neuron[0];
00357    for (i__ = 1; i__ <= i__1; ++i__) {
00358       y_ref(1, i__) = xeev_ref(*ievent, i__);
00359    }
00360    i__1 = fParam_1.layerm - 1;
00361    for (layer = 1; layer <= i__1; ++layer) {
00362       i__2 = fNeur_1.neuron[layer];
00363       for (j = 1; j <= i__2; ++j) {
00364          x_ref(layer + 1, j) = 0.;
00365          i__3 = fNeur_1.neuron[layer - 1];
00366          for (i__ = 1; i__ <= i__3; ++i__) {
00367             x_ref(layer + 1, j) = ( x_ref(layer + 1, j) + y_ref(layer, i__) 
00368                                     * w_ref(layer + 1, j, i__) );
00369          }
00370          x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
00371          i__3 = layer + 1;
00372          Foncf(&i__3, &x_ref(layer + 1, j), &f);
00373          y_ref(layer + 1, j) = f;
00374       }
00375    }
00376 } 
00377 
00378 #undef ww_ref
00379 #undef y_ref
00380 #undef x_ref
00381 #undef w_ref
00382 #undef xeev_ref
00383 
00384 #define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2)
00385 
00386 void TMVA::MethodCFMlpANN_Utils::Leclearn( Int_t *ktest, Double_t *tout2, Double_t *tin2 )
00387 {
00388    // [smart comments to be added]
00389    Int_t i__1, i__2;
00390 
00391    Int_t i__, j, k, l;
00392    Int_t nocla[max_nNodes_], ikend;
00393    Double_t xpg[max_nVar_];
00394 
00395    *ktest = 0;
00396    i__1 = fParam_1.lclass;
00397    for (k = 1; k <= i__1; ++k) {
00398       nocla[k - 1] = 0;
00399    }
00400    i__1 = fParam_1.nvar;
00401    for (i__ = 1; i__ <= i__1; ++i__) {
00402       fVarn_1.xmin[i__ - 1] = 1e30;
00403       fVarn_1.xmax[i__ - 1] = -fVarn_1.xmin[i__ - 1];
00404    }
00405    i__1 = fParam_1.nevl;
00406    for (i__ = 1; i__ <= i__1; ++i__) {
00407       DataInterface(tout2, tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar, 
00408                     xpg, &fVarn_1.nclass[i__ - 1], &ikend);
00409       if (ikend == -1) {
00410          break;
00411       }
00412 
00413       CollectVar(&fParam_1.nvar, &fVarn_1.nclass[i__ - 1], xpg);
00414 
00415       i__2 = fParam_1.nvar;
00416       for (j = 1; j <= i__2; ++j) {        
00417          xeev_ref(i__, j) = xpg[j - 1];
00418       }
00419       if (fVarn_1.iclass == 1) {
00420          i__2 = fParam_1.lclass;
00421          for (k = 1; k <= i__2; ++k) {
00422             if (fVarn_1.nclass[i__ - 1] == k) {
00423                ++nocla[k - 1];
00424             }
00425          }
00426       }
00427       i__2 = fParam_1.nvar;
00428       for (k = 1; k <= i__2; ++k) {
00429          if (xeev_ref(i__, k) < fVarn_1.xmin[k - 1]) {
00430             fVarn_1.xmin[k - 1] = xeev_ref(i__, k);
00431          }
00432          if (xeev_ref(i__, k) > fVarn_1.xmax[k - 1]) {
00433             fVarn_1.xmax[k - 1] = xeev_ref(i__, k);
00434          }
00435       }
00436    }
00437 
00438    if (fVarn_1.iclass == 1) {
00439       i__2 = fParam_1.lclass;
00440       for (k = 1; k <= i__2; ++k) {
00441          i__1 = fParam_1.lclass;
00442          for (l = 1; l <= i__1; ++l) {
00443             if (nocla[k - 1] != nocla[l - 1]) {
00444                *ktest = 1;
00445             }
00446          }
00447       }
00448    }
00449    i__1 = fParam_1.nevl;
00450    for (i__ = 1; i__ <= i__1; ++i__) {
00451       i__2 = fParam_1.nvar;
00452       for (l = 1; l <= i__2; ++l) {
00453          if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
00454                                                                            Float_t)0.) {
00455             xeev_ref(i__, l) = (Float_t)0.;
00456          } 
00457          else {
00458             xeev_ref(i__, l) = xeev_ref(i__, l) - (fVarn_1.xmax[l - 1] + 
00459                                                    fVarn_1.xmin[l - 1]) / 2.;
00460             xeev_ref(i__, l) = xeev_ref(i__, l) / ((fVarn_1.xmax[l - 1] - 
00461                                                     fVarn_1.xmin[l - 1]) / 2.);
00462          }
00463       }
00464    }
00465 }
00466 
00467 #undef xeev_ref
00468 
00469 #define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00470 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00471 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
00472 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
00473 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00474 #define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7]
00475 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
00476 #define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7]
00477 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
00478 
00479 void TMVA::MethodCFMlpANN_Utils::En_arriere( Int_t *ievent )
00480 {
00481    // [smart comments to be added]
00482    Int_t i__1, i__2, i__3;
00483 
00484    Double_t f;
00485    Int_t i__, j, k, l;
00486    Double_t df, uu;
00487 
00488    i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
00489    for (i__ = 1; i__ <= i__1; ++i__) {
00490       if (fVarn_1.nclass[*ievent - 1] == i__) {
00491          fNeur_1.o[i__ - 1] = 1.;
00492       } 
00493       else {
00494          fNeur_1.o[i__ - 1] = -1.;
00495       }
00496    }
00497    l = fParam_1.layerm;
00498    i__1 = fNeur_1.neuron[l - 1];
00499    for (i__ = 1; i__ <= i__1; ++i__) {
00500       f = y_ref(l, i__);
00501       df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
00502       del_ref(l, i__) = df * (fNeur_1.o[i__ - 1] - y_ref(l, i__)) * 
00503          fDel_1.coef[i__ - 1];
00504       delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
00505       i__2 = fNeur_1.neuron[l - 2];
00506       for (j = 1; j <= i__2; ++j) {
00507          delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(l - 
00508                                                                        1, j);
00509          /* L20: */
00510       }
00511    }
00512    for (l = fParam_1.layerm - 1; l >= 2; --l) {
00513       i__2 = fNeur_1.neuron[l - 1];
00514       for (i__ = 1; i__ <= i__2; ++i__) {
00515          uu = 0.;
00516          i__1 = fNeur_1.neuron[l];
00517          for (k = 1; k <= i__1; ++k) {
00518             uu += w_ref(l + 1, k, i__) * del_ref(l + 1, k);
00519          }
00520          Foncf(&l, &x_ref(l, i__), &f);
00521          df = (f + 1.) * (1. - f) / (fDel_1.temp[l - 1] * 2.);
00522          del_ref(l, i__) = df * uu;
00523          delww_ref(l, i__) = fParam_1.eeps * del_ref(l, i__);
00524          i__1 = fNeur_1.neuron[l - 2];
00525          for (j = 1; j <= i__1; ++j) {
00526             delw_ref(l, i__, j) = fParam_1.eeps * del_ref(l, i__) * y_ref(
00527                                                                           l - 1, j);
00528          }
00529       }
00530    }
00531    i__1 = fParam_1.layerm;
00532    for (l = 2; l <= i__1; ++l) {
00533       i__2 = fNeur_1.neuron[l - 1];
00534       for (i__ = 1; i__ <= i__2; ++i__) {
00535          deltaww_ref(l, i__) = delww_ref(l, i__) + fParam_1.eta * 
00536             deltaww_ref(l, i__);
00537          ww_ref(l, i__) = ww_ref(l, i__) + deltaww_ref(l, i__);
00538          i__3 = fNeur_1.neuron[l - 2];
00539          for (j = 1; j <= i__3; ++j) {
00540             delta_ref(l, i__, j) = delw_ref(l, i__, j) + fParam_1.eta * 
00541                delta_ref(l, i__, j);
00542             w_ref(l, i__, j) = w_ref(l, i__, j) + delta_ref(l, i__, j);
00543          }
00544       }
00545    }
00546 }
00547 
00548 #undef deltaww_ref
00549 #undef del_ref
00550 #undef ww_ref
00551 #undef delww_ref
00552 #undef delta_ref
00553 #undef y_ref
00554 #undef x_ref
00555 #undef w_ref
00556 #undef delw_ref
00557 
00558 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00559 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
00560 
00561 void TMVA::MethodCFMlpANN_Utils::Out( Int_t *iii, Int_t *maxcycle )
00562 {
00563    // write weights to file
00564 
00565    if (*iii == *maxcycle) {
00566       // now in MethodCFMlpANN.cxx
00567    }
00568 }
00569 
00570 #undef ww_ref
00571 #undef w_ref
00572 
00573 #define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00574 #define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7]
00575 
00576 void TMVA::MethodCFMlpANN_Utils::Innit( char *det, Double_t *tout2, Double_t *tin2, Int_t )
00577 {
00578    // Initialization
00579    Int_t i__1, i__2, i__3;
00580 
00581    Int_t i__, j;
00582    Int_t nevod, layer, ktest, i1, nrest;
00583    Int_t ievent(0);
00584    Int_t kkk;
00585    Double_t xxx, yyy;
00586 
00587    Leclearn(&ktest, tout2, tin2);
00588    Lecev2(&ktest, tout2, tin2);
00589    if (ktest == 1) {
00590       printf( " .... strange to be here (1) ... \n");
00591       std::exit(1);
00592    }
00593    i__1 = fParam_1.layerm - 1;
00594    for (layer = 1; layer <= i__1; ++layer) {
00595       i__2 = fNeur_1.neuron[layer];
00596       for (j = 1; j <= i__2; ++j) {
00597          deltaww_ref(layer + 1, j) = 0.;
00598          i__3 = fNeur_1.neuron[layer - 1];
00599          for (i__ = 1; i__ <= i__3; ++i__) {
00600             delta_ref(layer + 1, j, i__) = 0.;
00601          }
00602       }
00603    }
00604    if (fParam_1.ichoi == 1) {
00605       Inl();
00606    } 
00607    else {
00608       Wini();
00609    }
00610    kkk = 0;
00611    i__3 = fParam_1.nblearn;
00612    Timer timer( i__3, "CFMlpANN" ); 
00613    Int_t num = i__3/100;
00614 
00615    for (i1 = 1; i1 <= i__3; ++i1) {
00616 
00617       if ( ( num>0 && (i1-1)%num == 0) || (i1 == i__3) ) timer.DrawProgressBar( i1-1 );
00618 
00619       i__2 = fParam_1.nevl;
00620       for (i__ = 1; i__ <= i__2; ++i__) {
00621          ++kkk;
00622          if (fCost_1.ieps == 2) {
00623             fParam_1.eeps = Fdecroi(&kkk);
00624          }
00625          if (fCost_1.ieps == 1) {
00626             fParam_1.eeps = fParam_1.epsmin;
00627          }
00628          Bool_t doCont = kTRUE;
00629          if (fVarn_1.iclass == 2) {
00630             ievent = (Int_t) ((Double_t) fParam_1.nevl * Sen3a());
00631             if (ievent == 0) {
00632                doCont = kFALSE;
00633             }
00634          }
00635          if (doCont) {
00636             if (fVarn_1.iclass == 1) {
00637                nevod = fParam_1.nevl / fParam_1.lclass;
00638                nrest = i__ % fParam_1.lclass;
00639                fParam_1.ndiv = i__ / fParam_1.lclass;
00640                if (nrest != 0) {
00641                   ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - nrest) * 
00642                      nevod;
00643                } 
00644                else {
00645                   ievent = fParam_1.ndiv;
00646                }
00647             }
00648             En_avant(&ievent);
00649             En_arriere(&ievent);
00650          }
00651       }
00652       yyy = 0.;
00653       if (i1 % fParam_1.ndivis == 0 || i1 == 1 || i1 == fParam_1.nblearn) {
00654          Cout(&i1, &xxx);
00655          Cout2(&i1, &yyy);
00656          GraphNN(&i1, &xxx, &yyy, det, (Int_t)20);
00657          Out(&i1, &fParam_1.nblearn);
00658       }
00659       if (xxx < fCost_1.tolcou) {
00660          GraphNN(&fParam_1.nblearn, &xxx, &yyy, det, (Int_t)20);
00661          Out(&fParam_1.nblearn, &fParam_1.nblearn);
00662          break;
00663       }
00664    }
00665 }
00666 
00667 #undef deltaww_ref
00668 #undef delta_ref
00669 
00670 void TMVA::MethodCFMlpANN_Utils::TestNN()
00671 {
00672    // [smart comments to be added]
00673    Int_t i__1;
00674 
00675    Int_t i__;
00676    Int_t ktest;
00677 
00678    ktest = 0;
00679    if (fParam_1.layerm > max_nLayers_) {
00680       ktest = 1;
00681       printf("Error: number of layers exceeds maximum: %i, %i ==> abort", 
00682              fParam_1.layerm, max_nLayers_ );
00683       Arret("modification of mlpl3_param_lim.inc is needed ");
00684    }
00685    if (fParam_1.nevl > max_Events_) {
00686       ktest = 1;
00687       printf("Error: number of training events exceeds maximum: %i, %i ==> abort", 
00688              fParam_1.nevl, max_Events_ );
00689       Arret("modification of mlpl3_param_lim.inc is needed ");
00690    }
00691    if (fParam_1.nevt > max_Events_) {
00692       printf("Error: number of testing events exceeds maximum: %i, %i ==> abort", 
00693              fParam_1.nevt, max_Events_ );
00694       Arret("modification of mlpl3_param_lim.inc is needed ");
00695    }
00696    if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
00697       ktest = 1;
00698       printf("Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
00699              fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
00700       Arret("problem needs to reported ");
00701    }
00702    if (fParam_1.nvar > max_nVar_) {
00703       ktest = 1;
00704       printf("Error: number of variables exceeds maximum: %i, %i ==> abort", 
00705              fParam_1.nvar, fg_max_nVar_ );
00706       Arret("modification of mlpl3_param_lim.inc is needed");
00707    }
00708    i__1 = fParam_1.layerm;
00709    for (i__ = 1; i__ <= i__1; ++i__) {
00710       if (fNeur_1.neuron[i__ - 1] > max_nNodes_) {
00711          ktest = 1;
00712          printf("Error: number of neurons at layer exceeds maximum: %i, %i ==> abort", 
00713                 i__, fg_max_nNodes_ );
00714       }
00715    }
00716    if (ktest == 1) {
00717       printf( " .... strange to be here (2) ... \n");
00718       std::exit(1);
00719    }
00720 }
00721 
00722 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
00723 
00724 void TMVA::MethodCFMlpANN_Utils::Cout( Int_t * /*i1*/, Double_t *xxx )
00725 {
00726    // [smart comments to be added]
00727    Int_t i__1, i__2;
00728    Double_t d__1;
00729    
00730    Double_t c__;
00731    Int_t i__, j;
00732    
00733    c__ = 0.;
00734    i__1 = fParam_1.nevl;
00735    for (i__ = 1; i__ <= i__1; ++i__) {
00736       En_avant(&i__);
00737       i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
00738       for (j = 1; j <= i__2; ++j) {
00739          if (fVarn_1.nclass[i__ - 1] == j) {
00740             fNeur_1.o[j - 1] = 1.;
00741          } 
00742          else {
00743             fNeur_1.o[j - 1] = -1.;
00744          }
00745          // Computing 2nd power 
00746          d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
00747          c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
00748       }
00749    }
00750    c__ /= (Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
00751    *xxx = c__;
00752    fCost_1.ancout = c__;
00753 }
00754 
00755 #undef y_ref
00756 
00757 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
00758 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
00759 
00760 void TMVA::MethodCFMlpANN_Utils::Inl()
00761 {
00762    // [smart comments to be added]
00763    Int_t i__1, i__2, i__3;
00764 
00765    Int_t jmin, jmax, k, layer, kk, nq, nr;
00766 
00767    i__1 = fParam_1.nvar;
00768    i__1 = fParam_1.layerm;
00769    i__1 = fParam_1.layerm - 1;
00770    for (layer = 1; layer <= i__1; ++layer) {
00771       nq = fNeur_1.neuron[layer] / 10;
00772       nr = fNeur_1.neuron[layer] - nq * 10;
00773       if (nr == 0) {
00774          kk = nq;
00775       } 
00776       else {
00777          kk = nq + 1;
00778       }
00779       i__2 = kk;
00780       for (k = 1; k <= i__2; ++k) {
00781          jmin = k * 10 - 9;
00782          jmax = k * 10;
00783          if (fNeur_1.neuron[layer] < jmax) {
00784             jmax = fNeur_1.neuron[layer];
00785          }
00786          i__3 = fNeur_1.neuron[layer - 1];
00787       }
00788    }
00789 }
00790 
00791 #undef ww_ref
00792 #undef w_ref
00793 
00794 Double_t TMVA::MethodCFMlpANN_Utils::Fdecroi( Int_t *i__ )
00795 {
00796    // [smart comments to be added]
00797    Double_t ret_val;
00798    
00799    Double_t aaa, bbb;
00800    
00801    aaa = (fParam_1.epsmin - fParam_1.epsmax) / (Double_t) (fParam_1.nblearn * 
00802                                                            fParam_1.nevl - 1);
00803    bbb = fParam_1.epsmax - aaa;
00804    ret_val = aaa * (Double_t) (*i__) + bbb;
00805    return ret_val;
00806 }
00807 
00808 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
00809 
00810 void TMVA::MethodCFMlpANN_Utils::GraphNN( Int_t *ilearn, Double_t * /*xxx*/, 
00811                                           Double_t * /*yyy*/, char * /*det*/, Int_t  /*det_len*/ )
00812 {
00813    // [smart comments to be added]
00814    Int_t i__1, i__2;
00815    
00816    Double_t xmok[max_nNodes_];
00817    Float_t xpaw;
00818    Double_t xmko[max_nNodes_];
00819    Int_t i__, j;
00820    Int_t ix;
00821    Int_t jjj;
00822    Float_t vbn[10];
00823    Int_t nko[max_nNodes_], nok[max_nNodes_];
00824 
00825    for (i__ = 1; i__ <= 10; ++i__) {
00826       vbn[i__ - 1] = (Float_t)0.;
00827    }
00828    if (*ilearn == 1) {
00829       // AH: removed output 
00830    }
00831    i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
00832    for (i__ = 1; i__ <= i__1; ++i__) {
00833       nok[i__ - 1] = 0;
00834       nko[i__ - 1] = 0;
00835       xmok[i__ - 1] = 0.;
00836       xmko[i__ - 1] = 0.;
00837    }
00838    i__1 = fParam_1.nevl;
00839    for (i__ = 1; i__ <= i__1; ++i__) {
00840       En_avant(&i__);
00841       i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
00842       for (j = 1; j <= i__2; ++j) {
00843          xpaw = (Float_t) y_ref(fParam_1.layerm, j);
00844          if (fVarn_1.nclass[i__ - 1] == j) {
00845             ++nok[j - 1];
00846             xmok[j - 1] += y_ref(fParam_1.layerm, j);
00847          } 
00848          else {
00849             ++nko[j - 1];
00850             xmko[j - 1] += y_ref(fParam_1.layerm, j);
00851             jjj = j + fNeur_1.neuron[fParam_1.layerm - 1];
00852          }
00853          if (j <= 9) {
00854             vbn[j - 1] = xpaw;
00855          }
00856       }
00857       vbn[9] = (Float_t) fVarn_1.nclass[i__ - 1];
00858    }
00859    i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
00860    for (j = 1; j <= i__1; ++j) {
00861       xmok[j - 1] /= (Double_t) nok[j - 1];
00862       xmko[j - 1] /= (Double_t) nko[j - 1];
00863       fNeur_1.cut[j - 1] = (xmok[j - 1] + xmko[j - 1]) / 2.;
00864    }
00865    ix = fNeur_1.neuron[fParam_1.layerm - 1];
00866    i__1 = ix;
00867 }
00868 
00869 #undef y_ref
00870 
00871 Double_t TMVA::MethodCFMlpANN_Utils::Sen3a( void )
00872 {
00873    // [smart comments to be added]
00874 
00875    // Initialized data
00876    Int_t    m12 = 4096;
00877    Double_t f1  = 2.44140625e-4;
00878    Double_t f2  = 5.96046448e-8;
00879    Double_t f3  = 1.45519152e-11;
00880    Int_t    j1  = 3823;
00881    Int_t    j2  = 4006;
00882    Int_t    j3  = 2903;
00883    static Int_t fg_i1 = 3823;
00884    static Int_t fg_i2 = 4006;
00885    static Int_t fg_i3 = 2903;
00886 
00887    Double_t ret_val;
00888    Int_t    k3, l3, k2, l2, k1, l1;
00889 
00890    // reference: /k.d.senne/j. stochastics/ vol 1,no 3 (1974),pp.215-38 
00891    k3 = fg_i3 * j3;
00892    l3 = k3 / m12;
00893    k2 = fg_i2 * j3 + fg_i3 * j2 + l3;
00894    l2 = k2 / m12;
00895    k1 = fg_i1 * j3 + fg_i2 * j2 + fg_i3 * j1 + l2;
00896    l1 = k1 / m12;
00897    fg_i1 = k1 - l1 * m12;
00898    fg_i2 = k2 - l2 * m12;
00899    fg_i3 = k3 - l3 * m12;
00900    ret_val = f1 * (Double_t) fg_i1 + f2 * (Float_t) fg_i2 + f3 * (Double_t) fg_i3;
00901 
00902    return ret_val;
00903 } 
00904 
00905 void TMVA::MethodCFMlpANN_Utils::Foncf( Int_t *i__, Double_t *u, Double_t *f )
00906 {
00907    // [needs to be checked]
00908    Double_t yy;
00909 
00910    if (*u / fDel_1.temp[*i__ - 1] > 170.) {
00911       *f = .99999999989999999;
00912    } 
00913    else if (*u / fDel_1.temp[*i__ - 1] < -170.) {
00914       *f = -.99999999989999999;
00915    } 
00916    else {
00917       yy = TMath::Exp(-(*u) / fDel_1.temp[*i__ - 1]);
00918       *f = (1. - yy) / (yy + 1.);
00919    }
00920 }
00921 
00922 #undef w_ref
00923 
00924 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
00925 
00926 void TMVA::MethodCFMlpANN_Utils::Cout2( Int_t * /*i1*/, Double_t *yyy )
00927 {
00928    // [smart comments to be added]
00929    Int_t i__1, i__2;
00930    Double_t d__1;
00931 
00932    Double_t c__;
00933    Int_t i__, j;
00934 
00935    c__ = 0.;
00936    i__1 = fParam_1.nevt;
00937    for (i__ = 1; i__ <= i__1; ++i__) {
00938       En_avant2(&i__);
00939       i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
00940       for (j = 1; j <= i__2; ++j) {
00941          if (fVarn_1.mclass[i__ - 1] == j) {
00942             fNeur_1.o[j - 1] = 1.;
00943          } 
00944          else {
00945             fNeur_1.o[j - 1] = -1.;
00946          }
00947          /* Computing 2nd power */
00948          d__1 = y_ref(fParam_1.layerm, j) - fNeur_1.o[j - 1];
00949          c__ += fDel_1.coef[j - 1] * (d__1 * d__1);
00950       }
00951    }
00952    c__ /= (Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
00953    *yyy = c__;
00954 }
00955 
00956 #undef y_ref
00957 
00958 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
00959 
00960 void TMVA::MethodCFMlpANN_Utils::Lecev2( Int_t *ktest, Double_t *tout2, Double_t *tin2 )
00961 {
00962    // [smart comments to be added]
00963    Int_t i__1, i__2;
00964 
00965    Int_t i__, j, k, l, mocla[max_nNodes_], ikend;
00966    Double_t xpg[max_nVar_];
00967 
00968    /* NTRAIN: Nb of events used during the learning */
00969    /* NTEST: Nb of events used for the test */
00970    /* TIN: Input variables */
00971    /* TOUT: type of the event */
00972 
00973    *ktest = 0;
00974    i__1 = fParam_1.lclass;
00975    for (k = 1; k <= i__1; ++k) {
00976       mocla[k - 1] = 0;
00977    }
00978    i__1 = fParam_1.nevt;
00979    for (i__ = 1; i__ <= i__1; ++i__) {
00980       DataInterface(tout2, tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar, 
00981                     xpg, &fVarn_1.mclass[i__ - 1], &ikend);
00982 
00983       if (ikend == -1) {
00984          break;
00985       }
00986 
00987       i__2 = fParam_1.nvar;
00988       for (j = 1; j <= i__2; ++j) {
00989          xx_ref(i__, j) = xpg[j - 1];
00990       }
00991    }
00992  
00993    i__1 = fParam_1.nevt;
00994    for (i__ = 1; i__ <= i__1; ++i__) {
00995       i__2 = fParam_1.nvar;
00996       for (l = 1; l <= i__2; ++l) {
00997          if (fVarn_1.xmax[l - 1] == (Float_t)0. && fVarn_1.xmin[l - 1] == (
00998                                                                            Float_t)0.) {
00999             xx_ref(i__, l) = (Float_t)0.;
01000          } 
01001          else {
01002             xx_ref(i__, l) = xx_ref(i__, l) - (fVarn_1.xmax[l - 1] + 
01003                                                fVarn_1.xmin[l - 1]) / 2.;
01004             xx_ref(i__, l) = xx_ref(i__, l) / ((fVarn_1.xmax[l - 1] - 
01005                                                 fVarn_1.xmin[l - 1]) / 2.);
01006          }
01007       }
01008    }
01009 } 
01010 
01011 #undef xx_ref
01012 
01013 #define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187]
01014 #define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7]
01015 #define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7]
01016 #define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7]
01017 #define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2)
01018 
01019 void TMVA::MethodCFMlpANN_Utils::En_avant2( Int_t *ievent )
01020 {
01021    // [smart comments to be added]
01022    Int_t i__1, i__2, i__3;
01023 
01024    Double_t f;
01025    Int_t i__, j;
01026    Int_t layer;
01027 
01028    i__1 = fNeur_1.neuron[0];
01029    for (i__ = 1; i__ <= i__1; ++i__) {
01030       y_ref(1, i__) = xx_ref(*ievent, i__);
01031    }
01032    i__1 = fParam_1.layerm - 1;
01033    for (layer = 1; layer <= i__1; ++layer) {
01034       i__2 = fNeur_1.neuron[layer];
01035       for (j = 1; j <= i__2; ++j) {
01036          x_ref(layer + 1, j) = 0.;
01037          i__3 = fNeur_1.neuron[layer - 1];
01038          for (i__ = 1; i__ <= i__3; ++i__) {
01039             x_ref(layer + 1, j) = x_ref(layer + 1, j) + y_ref(layer, i__) 
01040                * w_ref(layer + 1, j, i__);
01041          }
01042          x_ref(layer + 1, j) = x_ref(layer + 1, j) + ww_ref(layer + 1, j);
01043          i__3 = layer + 1;
01044          Foncf(&i__3, &x_ref(layer + 1, j), &f);
01045          y_ref(layer + 1, j) = f;
01046          /* L2: */
01047       }
01048    }
01049 }
01050 
01051 #undef xx_ref
01052 #undef ww_ref
01053 #undef y_ref
01054 #undef x_ref
01055 #undef w_ref
01056 
01057 void TMVA::MethodCFMlpANN_Utils::Arret( const char* mot )
01058 {
01059    // fatal error occurred: stop execution
01060    printf("%s: %s",fg_MethodName, mot);
01061    std::exit(1);
01062 }
01063 
01064 void TMVA::MethodCFMlpANN_Utils::CollectVar( Int_t *nvar, Int_t *class__, Double_t *xpg )
01065 {
01066    // [smart comments to be added]
01067    Int_t i__1;
01068    
01069    Int_t i__;
01070    Float_t x[201];
01071 
01072    // Parameter adjustments
01073    --xpg;
01074 
01075    for (i__ = 1; i__ <= 201; ++i__) {
01076       x[i__ - 1] = 0.0;
01077    }
01078    x[0] = (Float_t) (*class__);
01079    i__1 = *nvar;
01080    for (i__ = 1; i__ <= i__1; ++i__) {
01081       x[i__] = (Float_t) xpg[i__];
01082    }
01083 }

Generated on Tue Jul 5 15:25:02 2011 for ROOT_528-00b_version by  doxygen 1.5.1