00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014 #include "Fit/Fitter.h"
00015 #include "Fit/Chi2FCN.h"
00016 #include "Fit/PoissonLikelihoodFCN.h"
00017 #include "Fit/LogLikelihoodFCN.h"
00018 #include "Math/Minimizer.h"
00019 #include "Math/MinimizerOptions.h"
00020 #include "Fit/BinData.h"
00021 #include "Fit/UnBinData.h"
00022 #include "Fit/FcnAdapter.h"
00023 #include "Math/Error.h"
00024
00025 #include <memory>
00026
00027 #include "Math/IParamFunction.h"
00028
00029 #include "Math/MultiDimParamFunctionAdapter.h"
00030
00031 namespace ROOT {
00032
00033 namespace Fit {
00034
00035
00036
00037 Fitter::Fitter() :
00038 fUseGradient(false),
00039 fBinFit(false),
00040 fFunc(0)
00041 {
00042
00043 fResult = std::auto_ptr<ROOT::Fit::FitResult>(new ROOT::Fit::FitResult() );
00044 }
00045
00046 Fitter::~Fitter()
00047 {
00048
00049
00050 if (fFunc) delete fFunc;
00051 }
00052
00053 Fitter::Fitter(const Fitter & rhs)
00054 {
00055
00056
00057 (*this) = rhs;
00058 }
00059
00060 Fitter & Fitter::operator = (const Fitter &rhs)
00061 {
00062
00063
00064 if (this == &rhs) return *this;
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076 return *this;
00077 }
00078
00079 void Fitter::SetFunction(const IModelFunction & func)
00080 {
00081 fUseGradient = false;
00082
00083
00084
00085
00086 fFunc = dynamic_cast<IModelFunction *>(func.Clone() );
00087 assert(fFunc != 0);
00088
00089
00090 fConfig.CreateParamsSettings(*fFunc);
00091 }
00092
00093
00094 void Fitter::SetFunction(const IModel1DFunction & func)
00095 {
00096 fUseGradient = false;
00097
00098
00099
00100 fFunc = new ROOT::Math::MultiDimParamFunctionAdapter(func);
00101
00102
00103 fConfig.CreateParamsSettings(*fFunc);
00104 }
00105
00106 void Fitter::SetFunction(const IGradModelFunction & func)
00107 {
00108 fUseGradient = true;
00109
00110
00111 fFunc = dynamic_cast<IGradModelFunction *> ( func.Clone() );
00112 assert(fFunc != 0);
00113
00114
00115 fConfig.CreateParamsSettings(*fFunc);
00116 }
00117
00118
00119 void Fitter::SetFunction(const IGradModel1DFunction & func)
00120 {
00121
00122 fUseGradient = true;
00123
00124 fFunc = new ROOT::Math::MultiDimParamGradFunctionAdapter(func);
00125
00126
00127 fConfig.CreateParamsSettings(*fFunc);
00128 }
00129
00130
00131 bool Fitter::FitFCN(const BaseFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
00132
00133
00134 unsigned int npar = fcn.NDim();
00135 if (npar == 0) {
00136 MATH_ERROR_MSG("Fitter::FitFCN","FCN function has zero parameters ");
00137 return false;
00138 }
00139 if (params != 0 )
00140 fConfig.SetParamsSettings(npar, params);
00141 else {
00142 if ( fConfig.ParamsSettings().size() != npar) {
00143 MATH_ERROR_MSG("Fitter::FitFCN","wrong fit parameter settings");
00144 return false;
00145 }
00146 }
00147 fBinFit = chi2fit;
00148
00149
00150 fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00151 if (fMinimizer.get() == 0) return false;
00152
00153 if (fFunc && fResult->FittedFunction() == 0) delete fFunc;
00154 fFunc = 0;
00155
00156 return DoMinimization<BaseFunc> (fcn, dataSize);
00157 }
00158
00159 bool Fitter::FitFCN(const BaseGradFunc & fcn, const double * params, unsigned int dataSize, bool chi2fit) {
00160
00161 unsigned int npar = fcn.NDim();
00162 if (npar == 0) {
00163 MATH_ERROR_MSG("Fitter::FitFCN","FCN function has zero parameters ");
00164 return false;
00165 }
00166 if (params != 0 )
00167 fConfig.SetParamsSettings(npar, params);
00168 else {
00169 if ( fConfig.ParamsSettings().size() != npar) {
00170 MATH_ERROR_MSG("Fitter::FitFCN","wrong fit parameter settings");
00171 return false;
00172 }
00173 }
00174 fBinFit = chi2fit;
00175
00176
00177 fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00178 if (fMinimizer.get() == 0) return false;
00179
00180 if (fFunc && fResult->FittedFunction() == 0) delete fFunc;
00181 fFunc = 0;
00182
00183
00184 return DoMinimization<BaseGradFunc> (fcn, dataSize);
00185 }
00186
00187 bool Fitter::FitFCN(MinuitFCN_t fcn, int npar, const double * params , unsigned int dataSize , bool chi2fit ) {
00188
00189
00190 if (npar == 0) {
00191 npar = fConfig.ParamsSettings().size();
00192 if (npar == 0) {
00193 MATH_ERROR_MSG("Fitter::FitFCN","Fit Parameter settings have not been created ");
00194 return false;
00195 }
00196 }
00197
00198 ROOT::Fit::FcnAdapter newFcn(fcn,npar);
00199 return FitFCN(newFcn,params,dataSize,chi2fit);
00200 }
00201
00202 bool Fitter::DoLeastSquareFit(const BinData & data) {
00203
00204
00205
00206
00207 fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00208
00209 if (fMinimizer.get() == 0) return false;
00210
00211 if (fFunc == 0) return false;
00212
00213 #ifdef DEBUG
00214 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[3].IsBound() << " lower limit " << Config().ParamsSettings()[3].LowerLimit() << " upper limit " << Config().ParamsSettings()[3].UpperLimit() << std::endl;
00215 #endif
00216
00217 fBinFit = true;
00218
00219
00220 if (!fUseGradient) {
00221
00222 Chi2FCN<BaseFunc> chi2(data,*fFunc);
00223 return DoMinimization<Chi2FCN<BaseFunc>::BaseObjFunction > (chi2, data.Size());
00224 }
00225 else {
00226
00227 IGradModelFunction * gradFun = dynamic_cast<IGradModelFunction *>(fFunc);
00228 if (gradFun != 0) {
00229 Chi2FCN<BaseGradFunc> chi2(data,*gradFun);
00230 return DoMinimization<Chi2FCN<BaseGradFunc>::BaseObjFunction > (chi2, data.Size());
00231 }
00232 MATH_ERROR_MSG("Fitter::DoLeastSquareFit","wrong type of function - it does not provide gradient");
00233 }
00234 return false;
00235 }
00236
00237 bool Fitter::DoLikelihoodFit(const BinData & data) {
00238
00239
00240
00241
00242 fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00243 if (fMinimizer.get() == 0) return false;
00244 if (fFunc == 0) return false;
00245
00246
00247 if (fConfig.MinimizerOptions().ErrorDef() == ROOT::Math::MinimizerOptions::DefaultErrorDef() ) {
00248 fConfig.MinimizerOptions().SetErrorDef(0.5);
00249 fMinimizer->SetErrorDef(0.5);
00250 }
00251
00252 fBinFit = true;
00253
00254
00255 Chi2FCN<BaseFunc> chi2(data,*fFunc);
00256
00257 if (!fUseGradient) {
00258
00259 PoissonLikelihoodFCN<BaseFunc> logl(data,*fFunc);
00260 return DoMinimization<PoissonLikelihoodFCN<BaseFunc>::BaseObjFunction > (logl, data.Size(), &chi2);
00261 }
00262 else {
00263
00264 IGradModelFunction * gradFun = dynamic_cast<IGradModelFunction *>(fFunc);
00265 if (gradFun != 0) {
00266
00267 PoissonLikelihoodFCN<BaseGradFunc> logl(data,*gradFun);
00268 return DoMinimization<PoissonLikelihoodFCN<BaseGradFunc>::BaseObjFunction > (logl, data.Size(), &chi2);
00269 }
00270 MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
00271
00272 }
00273 return false;
00274 }
00275
00276 bool Fitter::DoLikelihoodFit(const UnBinData & data) {
00277
00278
00279
00280 fMinimizer = std::auto_ptr<ROOT::Math::Minimizer> ( fConfig.CreateMinimizer() );
00281
00282 if (fMinimizer.get() == 0) return false;
00283
00284 if (fFunc == 0) return false;
00285
00286 fBinFit = false;
00287
00288 #ifdef DEBUG
00289 int ipar = 0;
00290 std::cout << "Fitter ParamSettings " << Config().ParamsSettings()[ipar].IsBound() << " lower limit " << Config().ParamsSettings()[ipar].LowerLimit() << " upper limit " << Config().ParamsSettings()[ipar].UpperLimit() << std::endl;
00291 #endif
00292
00293
00294 if (fConfig.MinimizerOptions().ErrorDef() == ROOT::Math::MinimizerOptions::DefaultErrorDef() ) {
00295 fConfig.MinimizerOptions().SetErrorDef(0.5);
00296 fMinimizer->SetErrorDef(0.5);
00297 }
00298
00299 if (!fUseGradient) {
00300
00301 LogLikelihoodFCN<BaseFunc> logl(data,*fFunc);
00302 return DoMinimization<LogLikelihoodFCN<BaseFunc>::BaseObjFunction > (logl, data.Size());
00303 }
00304 else {
00305
00306 IGradModelFunction * gradFun = dynamic_cast<IGradModelFunction *>(fFunc);
00307 if (gradFun != 0) {
00308 LogLikelihoodFCN<BaseGradFunc> logl(data,*gradFun);
00309 return DoMinimization<LogLikelihoodFCN<BaseGradFunc>::BaseObjFunction > (logl, data.Size());
00310 }
00311 MATH_ERROR_MSG("Fitter::DoLikelihoodFit","wrong type of function - it does not provide gradient");
00312 }
00313 return false;
00314 }
00315
00316 bool Fitter::DoLinearFit(const BinData & data ) {
00317
00318
00319 std::string prevminimizer = fConfig.MinimizerType();
00320 fConfig.SetMinimizer("Linear");
00321
00322 fBinFit = true;
00323
00324 bool ret = DoLeastSquareFit(data);
00325 fConfig.SetMinimizer(prevminimizer.c_str());
00326 return ret;
00327 }
00328
00329
00330 template<class Func>
00331 struct ObjFuncTrait {
00332 static unsigned int NCalls(const Func & ) { return 0; }
00333 static int Type(const Func & ) { return -1; }
00334 static bool IsGrad() { return false; }
00335 };
00336 template<>
00337 struct ObjFuncTrait<ROOT::Math::FitMethodFunction> {
00338 static unsigned int NCalls(const ROOT::Math::FitMethodFunction & f ) { return f.NCalls(); }
00339 static int Type(const ROOT::Math::FitMethodFunction & f) { return f.Type(); }
00340 static bool IsGrad() { return false; }
00341 };
00342 template<>
00343 struct ObjFuncTrait<ROOT::Math::FitMethodGradFunction> {
00344 static unsigned int NCalls(const ROOT::Math::FitMethodGradFunction & f ) { return f.NCalls(); }
00345 static int Type(const ROOT::Math::FitMethodGradFunction & f) { return f.Type(); }
00346 static bool IsGrad() { return true; }
00347 };
00348
00349
00350 template<class ObjFunc>
00351 bool Fitter::DoMinimization(const ObjFunc & objFunc, unsigned int dataSize, const ROOT::Math::IMultiGenFunction * chi2func) {
00352
00353
00354 assert( fConfig.ParamsSettings().size() == objFunc.NDim() );
00355
00356
00357
00358 fObjFunction = std::auto_ptr<ROOT::Math::IMultiGenFunction> ( objFunc.Clone() );
00359
00360 const ObjFunc * fcn = dynamic_cast<const ObjFunc *> (fObjFunction.get() );
00361 assert(fcn);
00362 fMinimizer->SetFunction( *fcn);
00363
00364 fMinimizer->SetVariables(fConfig.ParamsSettings().begin(), fConfig.ParamsSettings().end() );
00365
00366
00367
00368 if (fConfig.ParabErrors()) fMinimizer->SetValidError(true);
00369
00370
00371 bool ret = fMinimizer->Minimize();
00372
00373 #ifdef DEBUG
00374 std::cout << "ROOT::Fit::Fitter::DoMinimization : ncalls = " << ObjFuncTrait<ObjFunc>::NCalls(objFunc) << " type of objfunc " << ObjFuncTrait<ObjFunc>::Type(objFunc) << " typeid: " << typeid(objFunc).name() << " prov gradient " << ObjFuncTrait<ObjFunc>::IsGrad() << std::endl;
00375 #endif
00376
00377
00378 unsigned int ncalls = ObjFuncTrait<ObjFunc>::NCalls(*fcn);
00379 int fitType = ObjFuncTrait<ObjFunc>::Type(objFunc);
00380
00381
00382 fResult = std::auto_ptr<FitResult> ( new FitResult(*fMinimizer,fConfig, fFunc, ret, dataSize,
00383 fBinFit, chi2func, ncalls ) );
00384
00385 if (fConfig.NormalizeErrors() && fitType == ROOT::Math::FitMethodFunction::kLeastSquare ) fResult->NormalizeErrors();
00386
00387 return ret;
00388 }
00389
00390 bool Fitter::CalculateHessErrors() {
00391
00392
00393 if (!fMinimizer.get() || !fResult.get()) {
00394 MATH_ERROR_MSG("Fitter::CalculateHessErrors","Need to do a fit before calculating the errors");
00395 return false;
00396 }
00397
00398
00399 bool ret = fMinimizer->Hesse();
00400
00401
00402 ret |= fResult->Update(*fMinimizer, ret);
00403 return ret;
00404 }
00405
00406
00407 bool Fitter::CalculateMinosErrors() {
00408
00409
00410
00411
00412 fConfig.SetMinosErrors();
00413
00414 if (!fMinimizer.get() ) {
00415 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minimizer does not exist - cannot calculate Minos errors");
00416 return false;
00417 }
00418
00419 if (!fResult.get() || fResult->IsEmpty() ) {
00420 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Invalid Fit Result - cannot calculate Minos errors");
00421 return false;
00422 }
00423
00424 const std::vector<unsigned int> & ipars = fConfig.MinosParams();
00425 unsigned int n = (ipars.size() > 0) ? ipars.size() : fResult->Parameters().size();
00426 bool ok = false;
00427 for (unsigned int i = 0; i < n; ++i) {
00428 double elow, eup;
00429 unsigned int index = (ipars.size() > 0) ? ipars[i] : i;
00430 bool ret = fMinimizer->GetMinosError(index, elow, eup);
00431 if (ret) fResult->SetMinosError(index, elow, eup);
00432 ok |= ret;
00433 }
00434 if (!ok)
00435 MATH_ERROR_MSG("Fitter::CalculateMinosErrors","Minos error calculation failed for all parameters");
00436
00437 return ok;
00438 }
00439
00440
00441 }
00442
00443 }
00444