Fitter.cxx

Go to the documentation of this file.
00001 // @(#)root/mathcore:$Id: Fitter.cxx 36590 2010-11-11 10:17:52Z moneta $
00002 // Author: L. Moneta Mon Sep  4 17:00:10 2006
00003 
00004 /**********************************************************************
00005  *                                                                    *
00006  * Copyright (c) 2006  LCG ROOT Math Team, CERN/PH-SFT                *
00007  *                                                                    *
00008  *                                                                    *
00009  **********************************************************************/
00010 
00011 // Implementation file for class Fitter
00012 
00013 
00014 #include "Fit/Fitter.h"
00015 #include "Fit/Chi2FCN.h"
00016 #include "Fit/PoissonLikelihoodFCN.h"
00017 #include "Fit/LogLikelihoodFCN.h"
00018 #include "Math/Minimizer.h"
00019 #include "Math/MinimizerOptions.h"
00020 #include "Fit/BinData.h"
00021 #include "Fit/UnBinData.h"
00022 #include "Fit/FcnAdapter.h"
00023 #include "Math/Error.h"
00024 
00025 #include <memory> 
00026 
00027 #include "Math/IParamFunction.h" 
00028 
00029 #include "Math/MultiDimParamFunctionAdapter.h"
00030 
00031 namespace ROOT { 
00032 
00033    namespace Fit { 
00034 
00035 
00036 
00037 Fitter::Fitter() : 
00038    fUseGradient(false),
00039    fBinFit(false),
00040    fFunc(0)
00041 {
00042    // Default constructor implementation.
00043    fResult = std::auto_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult() );
00044 }
00045 
00046 Fitter::~Fitter() 
00047 {
00048    // Destructor implementation.
00049    // delete function if not empty
00050    if (fFunc) delete fFunc; 
00051 }
00052 
00053 Fitter::Fitter(const Fitter & rhs) 
00054 {
00055    // Implementation of copy constructor.
00056    // copy FitResult, FitConfig and clone fit function
00057    (*this) = rhs; 
00058 }
00059 
00060 Fitter & Fitter::operator = (const Fitter &rhs) 
00061 {
00062    // Implementation of assignment operator.
00063    // dummy implementation, since it is private
00064    if (this == &rhs) return *this;  // time saving self-test
00065 //    fUseGradient = rhs.fUseGradient; 
00066 //    fBinFit = rhs.fBinFit; 
00067 //    fResult = rhs.fResult;
00068 //    fConfig = rhs.fConfig; 
00069 //    // function is copied and managed by FitResult (maybe should use an auto_ptr)
00070 //    fFunc = fResult.ModelFunction(); 
00071 //    if (rhs.fFunc != 0 && fResult.ModelFunction() == 0) { // case no fit has been done yet - then clone 
00072 //       if (fFunc) delete fFunc; 
00073 //       fFunc = dynamic_cast<IModelFunction *>( (rhs.fFunc)->Clone() ); 
00074 //       assert(fFunc != 0); 
00075 //    }
00076    return *this; 
00077 }
00078 
00079 void Fitter::SetFunction(const IModelFunction & func) 
00080 {
00081    fUseGradient = false;
00082  
00083    //  set the fit model function (clone the given one and keep a copy ) 
00084    //std::cout << "set a non-grad function" << std::endl; 
00085 
00086    fFunc = dynamic_cast<IModelFunction *>(func.Clone() ); 
00087    assert(fFunc != 0);
00088    
00089    // creates the parameter  settings 
00090    fConfig.CreateParamsSettings(*fFunc); 
00091 }
00092 
00093 
00094 void Fitter::SetFunction(const IModel1DFunction & func) 
00095 { 
00096    fUseGradient = false;
00097    //std::cout << "set a 1d function" << std::endl; 
00098 
00099    // function is cloned when creating the adapter
00100    fFunc = new ROOT::Math::MultiDimParamFunctionAdapter(func);
00101 
00102    // creates the parameter  settings 
00103    fConfig.CreateParamsSettings(*fFunc); 
00104 }
00105 
00106 void Fitter::SetFunction(const IGradModelFunction & func) 
00107 { 
00108    fUseGradient = true;
00109    //std::cout << "set a grad function" << std::endl; 
00110    //  set the fit model function (clone the given one and keep a copy ) 
00111    fFunc = dynamic_cast<IGradModelFunction *> ( func.Clone() ); 
00112    assert(fFunc != 0);
00113 
00114    // creates the parameter  settings 
00115    fConfig.CreateParamsSettings(*fFunc); 
00116 }
00117 
00118 
00119 void Fitter::SetFunction(const IGradModel1DFunction & func) 
00120 { 
00121    //std::cout << "set a 1d grad function" << std::endl; 
00122    fUseGradient = true;
00123    // function is cloned when creating the adapter
00124    fFunc = new ROOT::Math::MultiDimParamGradFunctionAdapter(func);
00125 
00126    // creates the parameter  settings 
00127    fConfig.CreateParamsSettings(*fFunc); 
00128 }
00129 
00130 
00131 bool Fitter::FitFCN(const BaseFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) { 
00132    // fit a user provided FCN function
00133    // create fit parameter settings
00134    unsigned int npar  = fcn.NDim(); 
00135    if (npar == 0) { 
00136       MATH_ERROR_MSG("Fitter::FitFCN","FCN function has zero parameters ");
00137       return false;
00138    }
00139    if (params != 0 ) 
00140       fConfig.SetParamsSettings(npar, params);
00141    else {
00142       if ( fConfig.ParamsSettings().size() != npar) { 
00143          MATH_ERROR_MSG("Fitter::FitFCN","wrong fit parameter settings");
00144          return false;
00145       }
00146    }
00147    fBinFit = chi2fit; 
00148 
00149    // create Minimizer  
00150    fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00151    if (fMinimizer.get() == 0) return false; 
00152 
00153    if (fFunc && fResult->FittedFunction() == 0) delete fFunc; 
00154    fFunc = 0;
00155 
00156    return DoMinimization<BaseFunc> (fcn, dataSize); 
00157 }
00158 
00159 bool Fitter::FitFCN(const BaseGradFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) { 
00160    // fit a user provided FCN gradient function
00161    unsigned int npar  = fcn.NDim(); 
00162    if (npar == 0) { 
00163       MATH_ERROR_MSG("Fitter::FitFCN","FCN function has zero parameters ");
00164       return false;
00165    }
00166    if (params != 0  ) 
00167       fConfig.SetParamsSettings(npar, params);
00168    else {
00169       if ( fConfig.ParamsSettings().size() != npar) { 
00170          MATH_ERROR_MSG("Fitter::FitFCN","wrong fit parameter settings");
00171          return false;
00172       }
00173    }
00174    fBinFit = chi2fit; 
00175 
00176    // create Minimizer  (need to be done afterwards)
00177    fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00178    if (fMinimizer.get() == 0) return false; 
00179 
00180    if (fFunc && fResult->FittedFunction() == 0) delete fFunc; 
00181    fFunc = 0;
00182 
00183    // create fit configuration if null 
00184    return DoMinimization<BaseGradFunc> (fcn, dataSize); 
00185 }
00186 
00187 bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ) { 
00188    // fit using Minuit style FCN type (global function pointer)  
00189    // create corresponfing objective function from that function
00190    if (npar == 0) {
00191       npar = fConfig.ParamsSettings().size(); 
00192       if (npar == 0) { 
00193          MATH_ERROR_MSG("Fitter::FitFCN","Fit Parameter settings have not been created ");
00194          return false;
00195       }
00196    }
00197 
00198    ROOT::Fit::FcnAdapter  newFcn(fcn,npar); 
00199    return FitFCN(newFcn,params,dataSize,chi2fit); 
00200 }
00201 
00202 bool Fitter::DoLeastSquareFit(const BinData & data) { 
00203    // perform a chi2 fit on a set of binned data 
00204 
00205 
00206    // create Minimizer  
00207    fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00208 
00209    if (fMinimizer.get() == 0) return false; 
00210    
00211    if (fFunc == 0) return false; 
00212 
00213 #ifdef DEBUG
00214    std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit " <<  Config().ParamsSettings()[3].LowerLimit() << " upper limit " <<  Config().ParamsSettings()[3].UpperLimit() << std::endl;
00215 #endif
00216 
00217    fBinFit = true; 
00218 
00219    // check if fFunc provides gradient
00220    if (!fUseGradient) { 
00221       // do minimzation without using the gradient
00222       Chi2FCN<BaseFunc> chi2(data,*fFunc); 
00223       return DoMinimization<Chi2FCN<BaseFunc>::BaseObjFunction > (chi2, data.Size()); 
00224    } 
00225    else { 
00226       // use gradient 
00227       IGradModelFunction * gradFun = dynamic_cast<IGradModelFunction *>(fFunc); 
00228       if (gradFun != 0) { 
00229          Chi2FCN<BaseGradFunc> chi2(data,*gradFun); 
00230          return DoMinimization<Chi2FCN<BaseGradFunc>::BaseObjFunction > (chi2, data.Size()); 
00231       }
00232       MATH_ERROR_MSG("Fitter::DoLeastSquareFit","wrong type of function - it does not provide gradient");
00233    }
00234    return false; 
00235 }
00236 
00237 bool Fitter::DoLikelihoodFit(const BinData & data) { 
00238 
00239    // perform a likelihood fit on a set of binned data 
00240 
00241    // create Minimizer  
00242    fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00243    if (fMinimizer.get() == 0) return false; 
00244    if (fFunc == 0) return false; 
00245 
00246    // logl fit (error should be 0.5) set if different than default values (of 1)
00247    if (fConfig.MinimizerOptions().ErrorDef() == ROOT::Math::MinimizerOptions::DefaultErrorDef() ) { 
00248       fConfig.MinimizerOptions().SetErrorDef(0.5);
00249          fMinimizer->SetErrorDef(0.5);
00250    }
00251 
00252    fBinFit = true; 
00253 
00254    // create a chi2 function to be used for the equivalent chi-square
00255    Chi2FCN<BaseFunc> chi2(data,*fFunc); 
00256 
00257    if (!fUseGradient) { 
00258       // do minimzation without using the gradient
00259       PoissonLikelihoodFCN<BaseFunc> logl(data,*fFunc); 
00260       return DoMinimization<PoissonLikelihoodFCN<BaseFunc>::BaseObjFunction > (logl, data.Size(), &chi2); 
00261    } 
00262    else { 
00263       // check if fFunc provides gradient
00264       IGradModelFunction * gradFun = dynamic_cast<IGradModelFunction *>(fFunc); 
00265       if (gradFun != 0) { 
00266          // use gradient 
00267          PoissonLikelihoodFCN<BaseGradFunc> logl(data,*gradFun); 
00268          return DoMinimization<PoissonLikelihoodFCN<BaseGradFunc>::BaseObjFunction > (logl, data.Size(), &chi2); 
00269       }
00270       MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
00271 
00272    }
00273    return false; 
00274 }
00275 
00276 bool Fitter::DoLikelihoodFit(const UnBinData & data) { 
00277    // perform a likelihood fit on a set of unbinned data 
00278 
00279    // create Minimizer  
00280    fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00281 
00282    if (fMinimizer.get() == 0) return false; 
00283    
00284    if (fFunc == 0) return false; 
00285 
00286    fBinFit = false; 
00287 
00288 #ifdef DEBUG
00289    int ipar = 0;
00290    std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " <<  Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " <<  Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
00291 #endif
00292 
00293    // logl fit (error should be 0.5) set if different than default values (of 1)
00294    if (fConfig.MinimizerOptions().ErrorDef() == ROOT::Math::MinimizerOptions::DefaultErrorDef() ) {
00295       fConfig.MinimizerOptions().SetErrorDef(0.5);
00296       fMinimizer->SetErrorDef(0.5);
00297    }
00298 
00299    if (!fUseGradient) { 
00300       // do minimzation without using the gradient
00301       LogLikelihoodFCN<BaseFunc> logl(data,*fFunc); 
00302       return DoMinimization<LogLikelihoodFCN<BaseFunc>::BaseObjFunction > (logl, data.Size()); 
00303    } 
00304    else { 
00305       // use gradient : check if fFunc provides gradient
00306       IGradModelFunction * gradFun = dynamic_cast<IGradModelFunction *>(fFunc); 
00307       if (gradFun != 0) { 
00308          LogLikelihoodFCN<BaseGradFunc> logl(data,*gradFun); 
00309          return DoMinimization<LogLikelihoodFCN<BaseGradFunc>::BaseObjFunction > (logl, data.Size()); 
00310       }
00311       MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
00312    }      
00313    return false; 
00314 }
00315 
00316 bool Fitter::DoLinearFit(const BinData & data ) { 
00317 
00318    // perform a linear fit on a set of binned data 
00319    std::string  prevminimizer = fConfig.MinimizerType();  
00320    fConfig.SetMinimizer("Linear"); 
00321 
00322    fBinFit = true; 
00323 
00324    bool ret =  DoLeastSquareFit(data); 
00325    fConfig.SetMinimizer(prevminimizer.c_str());
00326    return ret; 
00327 }
00328 
00329 // traits for distinhuishing fit methods functions from generic objective functions
00330 template<class Func> 
00331 struct ObjFuncTrait { 
00332    static unsigned int NCalls(const Func &  ) { return 0; }
00333    static int Type(const Func & ) { return -1; }
00334    static bool IsGrad() { return false; }
00335 };
00336 template<>
00337 struct ObjFuncTrait<ROOT::Math::FitMethodFunction> { 
00338    static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
00339    static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
00340    static bool IsGrad() { return false; }
00341 };
00342 template<>
00343 struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> { 
00344    static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
00345    static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
00346    static bool IsGrad() { return true; }
00347 };
00348 
00349 
00350 template<class ObjFunc> 
00351 bool Fitter::DoMinimization(const ObjFunc & objFunc, unsigned int dataSize, const ROOT::Math::IMultiGenFunction * chi2func) { 
00352 
00353    // assert that params settings have been set correctly
00354    assert( fConfig.ParamsSettings().size() == objFunc.NDim() );
00355 
00356    // keep also a copy of FCN function and set this in minimizer so they will be managed together
00357    // (remember that cloned copy will still depends on data and model function pointers) 
00358    fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() ); 
00359    // in case of gradient function needs to downcast the pointer
00360    const ObjFunc * fcn = dynamic_cast<const ObjFunc *> (fObjFunction.get() );
00361    assert(fcn); 
00362    fMinimizer->SetFunction( *fcn);
00363 
00364    fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() ); 
00365 
00366 
00367    // if requested parabolic error do correct error  analysis by the minimizer (call HESSE) 
00368    if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
00369 
00370 
00371    bool ret = fMinimizer->Minimize(); 
00372 
00373 #ifdef DEBUG
00374    std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << ObjFuncTrait<ObjFunc>::NCalls(objFunc) << " type of objfunc " << ObjFuncTrait<ObjFunc>::Type(objFunc) << "  typeid: " << typeid(objFunc).name() << " prov gradient " << ObjFuncTrait<ObjFunc>::IsGrad() << std::endl;
00375 #endif
00376    
00377 
00378    unsigned int ncalls =  ObjFuncTrait<ObjFunc>::NCalls(*fcn);
00379    int fitType =  ObjFuncTrait<ObjFunc>::Type(objFunc);
00380 
00381 
00382    fResult = std::auto_ptr<FitResult> ( new FitResult(*fMinimizer,fConfig, fFunc, ret, dataSize, 
00383                                                       fBinFit, chi2func, ncalls ) );
00384 
00385    if (fConfig.NormalizeErrors() && fitType == ROOT::Math::FitMethodFunction::kLeastSquare ) fResult->NormalizeErrors(); 
00386 
00387    return ret; 
00388 }
00389 
00390 bool Fitter::CalculateHessErrors() { 
00391    // compute the Minos errors according to configuration
00392    // set in the parameters and append value in fit result
00393    if (!fMinimizer.get()  || !fResult.get()) { 
00394        MATH_ERROR_MSG("Fitter::CalculateHessErrors","Need to do a fit before calculating the errors");
00395        return false; 
00396    }
00397 
00398    //run Hesse
00399    bool ret = fMinimizer->Hesse();
00400 
00401    // update minimizer results with what comes out from Hesse
00402    ret |= fResult->Update(*fMinimizer, ret); 
00403    return ret; 
00404 }
00405 
00406 
00407 bool Fitter::CalculateMinosErrors() { 
00408    // compute the Minos errors according to configuration
00409    // set in the parameters and append value in fit result
00410    
00411    // in case it has not been set - set by default Minos on all parameters 
00412    fConfig.SetMinosErrors(); 
00413 
00414    if (!fMinimizer.get() ) { 
00415        MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
00416        return false; 
00417    }
00418 
00419    if (!fResult.get() || fResult->IsEmpty() ) { 
00420        MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
00421        return false; 
00422    }
00423 
00424    const std::vector<unsigned int> & ipars = fConfig.MinosParams(); 
00425    unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size(); 
00426    bool ok = false; 
00427    for (unsigned int i = 0; i < n; ++i) {
00428       double elow, eup;
00429       unsigned int index = (ipars.size() > 0) ? ipars[i] : i; 
00430       bool ret = fMinimizer->GetMinosError(index, elow, eup);
00431       if (ret) fResult->SetMinosError(index, elow, eup); 
00432       ok |= ret; 
00433    }
00434    if (!ok) 
00435        MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all parameters");
00436 
00437    return ok; 
00438 }
00439 
00440 
00441    } // end namespace Fit
00442 
00443 } // end namespace ROOT
00444 

Generated on Tue Jul 5 14:33:25 2011 for ROOT_528-00b_version by  doxygen 1.5.1