diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 53abf18..d07b985 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,152 +1,152 @@ stages: - build - test variables: - LCG_VERSION: "98python3" + LCG_VERSION: "101" BUILD_TYPE: "Release" BUILD_ROOFIT_TASK: "OFF" BUILD_DOCS: "OFF" .production_image: variables: LCG_OS: x86_64-centos7 image: gitlab-registry.cern.ch/ci-tools/ci-worker:cc7 tags: - cvmfs .lcg_setup: before_script: - set +e && source /cvmfs/sft.cern.ch/lcg/views/setupViews.sh LCG_$LCG_VERSION $LCG_OS-$LCG_COMPILER; set -e .build_template: stage: build extends: - .lcg_setup script: - mkdir install - mkdir build && cd build - cmake -DCMAKE_BUILD_TYPE:STRING=$BUILD_TYPE -DCMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/install -DLAURA_BUILD_EXAMPLES:BOOL=ON -DLAURA_BUILD_DOCS:BOOL=$BUILD_DOCS -DLAURA_BUILD_ROOFIT_TASK:BOOL=$BUILD_ROOFIT_TASK $CI_PROJECT_DIR - cmake --build . - cmake --build . --target install -build_clang10_opt: +build_clang12_opt: variables: - LCG_COMPILER: "clang10-opt" + LCG_COMPILER: "clang12-opt" extends: - .production_image - .build_template -build_gcc10_opt: +build_gcc11_opt: variables: - LCG_COMPILER: "gcc10-opt" + LCG_COMPILER: "gcc11-opt" extends: - .production_image - .build_template -build_gcc10_dbg: +build_gcc11_dbg: variables: - LCG_COMPILER: "gcc10-dbg" + LCG_COMPILER: "gcc11-dbg" BUILD_TYPE: "Debug" BUILD_ROOFIT_TASK: "ON" BUILD_DOCS: "ON" extends: - .production_image - .build_template artifacts: paths: - install expire_in: 1 day .test_template: stage: test variables: - LCG_COMPILER: "gcc10-dbg" + LCG_COMPILER: "gcc11-dbg" extends: - .production_image - .lcg_setup dependencies: - - build_gcc10_dbg + - build_gcc11_dbg artifacts: paths: - runtests expire_in: 1 day script: - export PATH=$CI_PROJECT_DIR/install/bin:$PATH - mkdir runtests && cd runtests - curl -o ft-eta-hist.root https://laura.hepforge.org/CI-files/ft-eta-hist-realistic.root - curl -O https://laura.hepforge.org/CI-files/dta-hist.root - curl -O https://laura.hepforge.org/CI-files/dte-hist.root - $CI_PROJECT_DIR/examples/runTimeDepTest.sh 0 $DTA_MODEL $DTR $DTR_PEREVENT test_flatDTA_noDTR: variables: DTA_MODEL: "flat" DTR: "0" DTR_PEREVENT: "0" extends: - .test_template test_flatDTA_avgDTR: variables: DTA_MODEL: "flat" DTR: "1" DTR_PEREVENT: "0" extends: - .test_template test_flatDTA_evtDTR: variables: DTA_MODEL: "flat" DTR: "1" DTR_PEREVENT: "1" extends: - .test_template test_histDTA_noDTR: variables: DTA_MODEL: "hist" DTR: "0" DTR_PEREVENT: "0" extends: - .test_template test_histDTA_avgDTR: variables: DTA_MODEL: "hist" DTR: "1" DTR_PEREVENT: "0" extends: - .test_template test_histDTA_evtDTR: variables: DTA_MODEL: "hist" DTR: "1" DTR_PEREVENT: "1" extends: - .test_template test_splineDTA_noDTR: variables: DTA_MODEL: "spline" DTR: "0" DTR_PEREVENT: "0" extends: - .test_template test_splineDTA_avgDTR: variables: DTA_MODEL: "spline" DTR: "1" DTR_PEREVENT: "0" extends: - .test_template test_splineDTA_evtDTR: variables: DTA_MODEL: "spline" DTR: "1" DTR_PEREVENT: "1" extends: - .test_template diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index affb773..960abf5 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,50 +1,52 @@ add_subdirectory(ProgOpts) list(APPEND EXAMPLE_SOURCES B3piKMatrixMassProj B3piKMatrixPlots CalcChiSq GenFit3K GenFit3KS GenFit3pi GenFitBelleCPKpipi GenFitDpipi GenFitDs2KKpi GenFitEFKLLM GenFitIncoherent_Bs2KSKpi GenFitKpipi GenFitNoDP GenFitNoDPMultiDim GenFitTimeDep GenFitTimeDep_Bs2KSKpi KMatrixDto3pi KMatrixExample MergeDataFiles mixedSampleTest PlotKMatrixTAmp PlotResults point2PointTestSample QuasiFlatSqDalitz QuasiFlatSqDalitz-CustomMasses ResultsExtractor SimFitCoordinator SimFitTask SimFitTaskRooFit Test_Dpipi + #FitDataDt + #FitMC ) if(NOT LAURA_BUILD_ROOFIT_TASK) list(REMOVE_ITEM EXAMPLE_SOURCES SimFitTaskRooFit) endif() foreach( _example ${EXAMPLE_SOURCES}) add_executable(${_example} ${_example}.cc) target_link_libraries(${_example} PRIVATE Laura++ ProgramOptions) install(TARGETS ${_example} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) endforeach() # Also install the python script version of GenFit3pi configure_file(GenFit3pi.py.in GenFit3pi.py @ONLY) install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/GenFit3pi.py DESTINATION ${CMAKE_INSTALL_BINDIR}) diff --git a/examples/GenFitTimeDep_Bs2KSKpi.cc b/examples/GenFitTimeDep_Bs2KSKpi.cc index ea611c3..97291b1 100644 --- a/examples/GenFitTimeDep_Bs2KSKpi.cc +++ b/examples/GenFitTimeDep_Bs2KSKpi.cc @@ -1,379 +1,381 @@ /* Copyright 2014 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ ///////////////////////////////////////////////////////////////////////// // // // Time-dependent model for Bs and Bsbar decays to KSK+pi- Dalitz plot // // // ///////////////////////////////////////////////////////////////////////// #include using std::cout; using std::cerr; using std::endl; #include #include #include "TFile.h" #include "TH2.h" #include "TRandom.h" #include "TString.h" #include "TSystem.h" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauEffModel.hh" #include "LauIsobarDynamics.hh" #include "LauMagPhaseCoeffSet.hh" #include "LauCartesianCPCoeffSet.hh" #include "LauRandom.hh" #include "LauRealImagCoeffSet.hh" #include "LauTimeDepNonFlavModel.hh" #include "LauVetoes.hh" void usage(const TString& progName) { cerr<<"Usage:"<2) { iFit = atoi(argv[2]); if (argc>3) { firstExpt = atoi(argv[3]); if (argc>4) { nExpt = atoi(argv[4]); if (argc>5) { nExptGen = atoi(argv[5]); if (argc>6) { Int_t eigval = atoi(argv[6]); if ( eigval == 1 ) { eigenvalue = LauTimeDepNonFlavModel::CPOdd; } else { eigenvalue = LauTimeDepNonFlavModel::CPEven; } } } } } } for (firstExptGen = 0; firstExptGen<(firstExpt+nExpt); firstExptGen+=nExptGen) { } firstExptGen -= nExptGen; if ( (nExpt > nExptGen) || (nExptGen%nExpt != 0) ) { cerr<<"Error, nExpt must be a factor of nExptGen."<2) { firstExptGen = atoi(argv[2]); if (argc>3) { nExptGen = atoi(argv[3]); if (argc>4) { Int_t eigval = atoi(argv[4]); if ( eigval == 1 ) { eigenvalue = LauTimeDepNonFlavModel::CPOdd; } else { eigenvalue = LauTimeDepNonFlavModel::CPEven; } } } } } Double_t nSigEvents = 1000; LauRandom::randomFun()->SetSeed(81648*(firstExptGen+1)); Bool_t squareDP = kTRUE; // Define DP KSK+pi- for both Bs and Bsbar decays LauDaughters* daughtersB0sbar_f = new LauDaughters("B_s0_bar", "K+", "pi-", "K_S0", squareDP); LauDaughters* daughtersB0s_f = new LauDaughters("B_s0", "K+", "pi-", "K_S0", squareDP); LauDaughters* daughtersB0sbar_fbar = new LauDaughters("B_s0_bar", "K-", "pi+", "K_S0", squareDP); LauDaughters* daughtersB0s_fbar = new LauDaughters("B_s0", "K-", "pi+", "K_S0", squareDP); // vetoes LauVetoes* vetoes = new LauVetoes(); // efficiency LauEffModel* effModelB0sbar_f = new LauEffModel(daughtersB0sbar_f , vetoes); LauEffModel* effModelB0s_f = new LauEffModel(daughtersB0s_f , vetoes); LauEffModel* effModelB0sbar_fbar = new LauEffModel(daughtersB0sbar_fbar, vetoes); LauEffModel* effModelB0s_fbar = new LauEffModel(daughtersB0s_fbar , vetoes); // signal dynamics LauIsobarDynamics* sigModelB0sbar_f = new LauIsobarDynamics(daughtersB0sbar_f, effModelB0sbar_f); sigModelB0sbar_f->setIntFileName("integ_B0sbar_f.dat"); sigModelB0sbar_f->addResonance("K*-(892)" , 1, LauAbsResonance::RelBW); sigModelB0sbar_f->addResonance("K*0(892)" , 3, LauAbsResonance::RelBW); LauIsobarDynamics* sigModelB0s_f = new LauIsobarDynamics(daughtersB0s_f, effModelB0s_f); sigModelB0s_f->setIntFileName("integ_B0s_f.dat"); sigModelB0s_f->addResonance("K*-(892)" , 1, LauAbsResonance::RelBW); sigModelB0s_f->addResonance("K*0(892)" , 3, LauAbsResonance::RelBW); LauIsobarDynamics* sigModelB0sbar_fbar = new LauIsobarDynamics(daughtersB0sbar_fbar, effModelB0sbar_fbar); sigModelB0sbar_fbar->setIntFileName("integ_B0sbar_fbar.dat"); sigModelB0sbar_fbar->addResonance("K*+(892)", 1, LauAbsResonance::RelBW); sigModelB0sbar_fbar->addResonance("K*0(892)", 3, LauAbsResonance::RelBW); LauIsobarDynamics* sigModelB0s_fbar = new LauIsobarDynamics(daughtersB0s_fbar, effModelB0s_fbar); sigModelB0s_fbar->setIntFileName("integ_B0s_fbar.dat"); sigModelB0s_fbar->addResonance("K*+(892)" , 1, LauAbsResonance::RelBW); sigModelB0s_fbar->addResonance("K*0(892)" , 3, LauAbsResonance::RelBW); // fit model LauTimeDepNonFlavModel* fitModel = new LauTimeDepNonFlavModel(sigModelB0sbar_f, sigModelB0s_f, sigModelB0sbar_fbar, sigModelB0s_fbar); fitModel->setASqMaxValue(1.20); std::vector > coeffset; Bool_t doTwoStageFit = kTRUE; LauAbsCoeffSet* Kstm_Bs_f = new LauCartesianCPCoeffSet("K*-(892)", 1.0, 0.0, 0.0, 0.0, kFALSE, kFALSE, kFALSE, kFALSE, doTwoStageFit, doTwoStageFit); LauAbsCoeffSet* Kstp_Bs_fbar = new LauCartesianCPCoeffSet("K*+(892)", 1.0, 0.0, 0.0, 0.0, kFALSE, kFALSE, kFALSE, kFALSE, doTwoStageFit, doTwoStageFit); LauAbsCoeffSet* Kstz_Bs_f = new LauCartesianCPCoeffSet("K*0(892)", 1.0, 0.0, 0.0, 0.0, kTRUE, kTRUE, kTRUE, kTRUE, doTwoStageFit, doTwoStageFit); LauAbsCoeffSet* Kstz_Bs_fbar = new LauCartesianCPCoeffSet("K*0(892)", 1.0, 0.0, 0.0, 0.0, kTRUE, kTRUE, kTRUE, kTRUE, doTwoStageFit, doTwoStageFit); Kstm_Bs_f->baseName("Kstm_Bs_f_"); Kstp_Bs_fbar->baseName("Kstp_Bs_fbar_"); Kstz_Bs_f->baseName("Kstz_Bs_f_"); Kstz_Bs_fbar->baseName("Kstz_Bs_fbar_"); // Adding a pair of coefficients for (B0s->f:B0sbar->fbar) and (B0s->fbar:B0sbar->f) coeffset.push_back(std::make_pair( Kstm_Bs_f, Kstp_Bs_fbar )); coeffset.push_back(std::make_pair( Kstz_Bs_f, Kstz_Bs_fbar )); for (std::vector >::iterator iter=coeffset.begin(); iter!=coeffset.end(); ++iter) { fitModel->setAmpCoeffSet(iter->first, iter->second); } fitModel->setCPEigenvalue( eigenvalue ); fitModel->setPhiMix( -2.0*LauConstants::beta_s, fixPhiMix, useSinCos ); // Tag cat fractions, dilutions and Delta dilutions (set to one) fitModel->addValidTagCat(63); fitModel->setSignalTagCatPars(63, 1.0, 1.0, 0.0, kTRUE); // Delta t PDFs const Double_t minDt(0.0); const Double_t maxDt(20.0); const Double_t minDtErr(0.0); const Double_t maxDtErr(2.5); const Int_t nGauss(3); std::vector scale(nGauss); scale[0] = kTRUE; scale[1] = kTRUE; scale[2] = kFALSE; std::vector dtPars(11); TString mean0Name("dt_"); mean0Name += "_mean_0"; TString mean1Name("dt_"); mean1Name += "_mean_1"; TString mean2Name("dt_"); mean2Name += "_mean_2"; TString sigma0Name("dt_"); sigma0Name += "_sigma_0"; TString sigma1Name("dt_"); sigma1Name += "_sigma_1"; TString sigma2Name("dt_"); sigma2Name += "_sigma_2"; TString frac1Name("dt_"); frac1Name += "_frac_1"; TString frac2Name("dt_"); frac2Name += "_frac_2"; TString tauName("dt_"); tauName += "_tau"; TString freqName("dt_"); freqName += "_deltaM"; TString wdiffName("dt_"); wdiffName += "_deltaGamma"; LauParameter * mean0 = new LauParameter(mean0Name, -0.181); LauParameter * mean1 = new LauParameter(mean1Name, -1.27); LauParameter * mean2 = new LauParameter(mean2Name, 0.0); LauParameter * sigma0 = new LauParameter(sigma0Name, 1.067); LauParameter * sigma1 = new LauParameter(sigma1Name, 3.0); LauParameter * sigma2 = new LauParameter(sigma2Name, 8.0); LauParameter * frac1 = new LauParameter(frac1Name, 0.0930); LauParameter * frac2 = new LauParameter(frac2Name, 0.0036); LauParameter * tau = new LauParameter(tauName, 1.480); LauParameter * freq = new LauParameter(freqName, 17.69); LauParameter * wdiff = new LauParameter(wdiffName, 0.100); TString mean0tagcat63Name("dt_"); mean0tagcat63Name += 63; mean0tagcat63Name += "_mean_0"; TString sigma0tagcat63Name("dt_"); sigma0tagcat63Name += 63; sigma0tagcat63Name += "_sigma_0"; LauParameter * mean0tagcat63 = new LauParameter(mean0tagcat63Name, -0.031); LauParameter * sigma0tagcat63 = new LauParameter(sigma0tagcat63Name, 0.972); for ( Int_t tagCat(0); tagCat<64; ++tagCat ) { if (tagCat==0){ dtPars[0] = mean0; dtPars[1] = mean1; dtPars[2] = mean2; dtPars[3] = sigma0; dtPars[4] = sigma1; dtPars[5] = sigma2; dtPars[6] = frac1; dtPars[7] = frac2; dtPars[8] = tau; dtPars[9] = freq; dtPars[10] = wdiff; } else if (tagCat==63){ dtPars[0] = mean0tagcat63; dtPars[1] = mean1->createClone(); dtPars[2] = mean2->createClone(); dtPars[3] = sigma0tagcat63; dtPars[4] = sigma1->createClone(); dtPars[5] = sigma2->createClone(); dtPars[6] = frac1->createClone(); dtPars[7] = frac2->createClone(); dtPars[8] = tau->createClone(); dtPars[9] = freq->createClone(); dtPars[10] = wdiff->createClone(); } else { dtPars[0] = mean0->createClone(); dtPars[1] = mean1->createClone(); dtPars[2] = mean2->createClone(); dtPars[3] = sigma0->createClone(); dtPars[4] = sigma1->createClone(); dtPars[5] = sigma2->createClone(); dtPars[6] = frac1->createClone(); dtPars[7] = frac2->createClone(); dtPars[8] = tau->createClone(); dtPars[9] = freq->createClone(); dtPars[10] = wdiff->createClone(); } - LauDecayTimePdf * dtPdf = new LauDecayTimePdf( "deltaTAvg", "deltaTAvgErr", dtPars, minDt, maxDt, minDtErr, maxDtErr, LauDecayTimePdf::ExpHypTrig, nGauss, scale, LauDecayTimePdf::DecayTime ); + LauDecayTimePdf * dtPdf = new LauDecayTimePdf( "deltaTAvg", "deltaTAvgErr", dtPars, minDt, maxDt, minDtErr, maxDtErr, LauDecayTime::FuncType::ExpHypTrig, nGauss, scale, LauDecayTime::TimeMeasurementMethod::DecayTime ); dtPdf->doSmearing(kFALSE); fitModel->setSignalDtPdf( tagCat, dtPdf ); if (tagCat==0) { tagCat=62; } } // set the number of signal events LauParameter* nSigPar = new LauParameter("signalEvents", nSigEvents, -2.0*nSigEvents, 2.0*nSigEvents, kTRUE); fitModel->setNSigEvents(nSigPar); // set the number of experiments if (command == "fit") { fitModel->setNExpts(nExpt, firstExpt); } else { fitModel->setNExpts(nExptGen, firstExptGen); } // Do not calculate asymmetric errors. fitModel->useAsymmFitErrors(kFALSE); // Randomise initial fit values for the signal mode fitModel->useRandomInitFitPars(kFALSE); // Switch off Poissonian smearing of total number of events fitModel->doPoissonSmearing(kFALSE); // Switch on Extended ML Fit option fitModel->doEMLFit(kFALSE); // Write LaTeX table fitModel->writeLatexTable(kFALSE); TString dataFile(""); TString treeName("fitTree"); TString rootFileName(""); TString tableFileName(""); TString fitToyFileName(""); TString splotFileName(""); if (command == "fit") { dataFile = "data"; dataFile += "_expts"; dataFile += firstExptGen; dataFile += "-"; dataFile += firstExptGen+nExptGen-1; dataFile += "_CP"; if ( eigenvalue == LauTimeDepNonFlavModel::CPEven ) { dataFile += "even"; } else { dataFile += "odd"; } dataFile += ".root"; rootFileName = "fit"; rootFileName += iFit; rootFileName += "_expts"; rootFileName += firstExpt; rootFileName += "-"; rootFileName += firstExpt+nExpt-1; rootFileName += ".root"; tableFileName = "fitResults_"; tableFileName += iFit; tableFileName += "_expts"; tableFileName += firstExpt; tableFileName += "-"; tableFileName += firstExpt+nExpt-1; fitToyFileName = "fitToyMC_"; fitToyFileName += iFit; fitToyFileName += "_expts"; fitToyFileName += firstExpt; fitToyFileName += "-"; fitToyFileName += firstExpt+nExpt-1; fitToyFileName += ".root"; splotFileName = "splot_"; splotFileName += iFit; splotFileName += "_expts"; splotFileName += firstExpt; splotFileName += "-"; splotFileName += firstExpt+nExpt-1; splotFileName += ".root"; } else { dataFile = "data"; dataFile += "_expts"; dataFile += firstExptGen; dataFile += "-"; dataFile += firstExptGen+nExptGen-1; dataFile += "_CP"; if ( eigenvalue == LauTimeDepNonFlavModel::CPEven ) { dataFile += "even"; } else { dataFile += "odd"; } dataFile += ".root"; rootFileName = "dummy.root"; tableFileName = "genResults"; } // Generate toy from the fitted parameters //fitModel->compareFitData(10, fitToyFileName); // Write out per-event likelihoods and sWeights //fitModel->writeSPlotData(splotFileName, "splot", kFALSE); // Run! fitModel->run(command, dataFile, treeName, rootFileName, tableFileName); + */ return EXIT_SUCCESS; } diff --git a/examples/ProgOpts/Test_Dpipi_ProgOpts.cc b/examples/ProgOpts/Test_Dpipi_ProgOpts.cc index a5da04f..fe9739c 100644 --- a/examples/ProgOpts/Test_Dpipi_ProgOpts.cc +++ b/examples/ProgOpts/Test_Dpipi_ProgOpts.cc @@ -1,142 +1,148 @@ #include #include "boost/program_options.hpp" #include "Test_Dpipi_ProgOpts.hh" namespace po = boost::program_options; void validate( boost::any& v, const std::vector& values, Command* /*target_type*/, int) { // Make sure no previous assignment to 'v' has been made po::validators::check_first_occurrence(v); // Extract the first string from 'values'. If there is more than // one string, it's an error, and exception will be thrown. const std::string& s { po::validators::get_single_string(values) }; if ( s == "gen" ) { v = boost::any( Command::Generate ); } else if ( s == "fit" ) { v = boost::any( Command::Fit ); } else if ( s == "simfit" ) { v = boost::any( Command::SimFit ); } else { throw po::validation_error(po::validation_error::invalid_option_value, "command", s, 3); } } void validate( boost::any& v, const std::vector& values, LauTimeDepFitModel::CPEigenvalue* /*target_type*/, int) { // Make sure no previous assignment to 'v' has been made po::validators::check_first_occurrence(v); // Extract the first string from 'values'. If there is more than // one string, it's an error, and exception will be thrown. const std::string& s { po::validators::get_single_string(values) }; if ( s == "QFS" ) { v = boost::any( LauTimeDepFitModel::CPEigenvalue::QFS ); } else if ( s == "CPEven" ) { v = boost::any( LauTimeDepFitModel::CPEigenvalue::CPEven ); } else if ( s == "CPOdd" ) { v = boost::any( LauTimeDepFitModel::CPEigenvalue::CPOdd ); } else { throw po::validation_error(po::validation_error::invalid_option_value); } } -void validate( boost::any& v, const std::vector& values, LauDecayTimePdf::EfficiencyMethod* /*target_type*/, int) +namespace LauDecayTime { +void validate( boost::any& v, const std::vector& values, EfficiencyMethod* /*target_type*/, int) { // Make sure no previous assignment to 'v' has been made po::validators::check_first_occurrence(v); // Extract the first string from 'values'. If there is more than // one string, it's an error, and exception will be thrown. const std::string& s { po::validators::get_single_string(values) }; - if ( s == "flat" ) { - v = boost::any( LauDecayTimePdf::EfficiencyMethod::Flat ); - } else if ( s == "hist" || s == "binned" ) { - v = boost::any( LauDecayTimePdf::EfficiencyMethod::Binned ); + if ( s == "uniform" || s == "flat" ) { + v = boost::any( EfficiencyMethod::Uniform ); + } else if ( s == "binned" || s == "hist" ) { + v = boost::any( EfficiencyMethod::Binned ); } else if ( s == "spline" ) { - v = boost::any( LauDecayTimePdf::EfficiencyMethod::Spline ); + v = boost::any( EfficiencyMethod::Spline ); } else { throw po::validation_error(po::validation_error::invalid_option_value); } } +}; TestDpipi_ProgramSettings::TestDpipi_ProgramSettings(const int argc, const char ** argv) { po::options_description main_desc{"Main options"}; main_desc.add_options() ("command", po::value(&command)->required(), "main command: gen, fit, or simfit") ; po::positional_options_description p; p.add("command", 1); po::options_description common_desc{"Common options"}; common_desc.add_options() ("help", "produce help message") ("dtype", po::value(&dType)->default_value(LauTimeDepFitModel::CPEigenvalue::QFS,"QFS"), "type of D decay: QFS, CPOdd, or CPEven") - ("dta-model", po::value(&timeEffModel)->default_value(LauDecayTimePdf::EfficiencyMethod::Flat,"flat"), "decay-time acceptance model: flat, hist/binned, spline") + ("dta-model", po::value(&timeEffModel)->default_value(LauDecayTime::EfficiencyMethod::Uniform,"uniform"), "decay-time acceptance model: uniform/flat, binned/hist, spline") ("dtr", po::value(&timeResolution)->default_value(kTRUE), "enable/disable decay-time resolution") ("dtr-perevent", po::value(&perEventTimeErr)->default_value(kFALSE), "enable/disable use of per-event decay-time error (requires decay-time resolution to be enabled to take effect)") ("seed", po::value(&RNGseed)->default_value(0), "set the seed for the RNG; if not set, the time is used to generate a seed") + ("dir", po::value(&directory)->default_value("",""), "set the directory used to find the nTuples within the root file. Defaults to no directory") ; po::options_description gen_desc{"Generation options"}; gen_desc.add_options() ("firstExptGen", po::value(&firstExptGen)->default_value(0), "first experiment to generate") ("nExptGen", po::value(&nExptGen)->default_value(1), "number of experiments to generate") ; po::options_description fit_desc{"Fitting options"}; fit_desc.add_options() ("firstExpt", po::value(&firstExptFit)->default_value(0), "first experiment to fit") ("nExpt", po::value(&nExptFit)->default_value(1), "number of experiments to fit") ("iFit", po::value(&iFit)->default_value(0), "fit ID - used to distinguish multiple fits to a given dataset (used to disentangle multiple minima)") ("fixTau", po::value(&fixLifetime)->default_value(kTRUE), "enable/disable floating of B lifetime parameter") ("fixDm", po::value(&fixDeltaM)->default_value(kTRUE), "enable/disable floating of B mixing frequency parameter") ("fixPhiMix", po::value(&fixPhiMix)->default_value(kFALSE), "enable/disable floating of B mixing phase parameter(s)") + ("fixSplineKnots", po::value(&fixSplineKnots)->default_value(kFALSE), "enable/disable floating of the decay-time-acceptance spline") + ("useSinCos", po::value(&useSinCos)->default_value(kTRUE), "enable/disable using sine and cosine of B mixing phase as the floating parameters rather than the phase itself") + ("useAveDeltaCalibVals", po::value(&useAveDeltaCalibVals)->default_value(kTRUE), "enable/disable fitting calib values as their average +/- delta values rather than having separate values for B and Bbar") ; po::options_description simfit_desc{"Simultaneous fitting options"}; simfit_desc.add_options() ("port", po::value(&port)->default_value(0), "port number on which sim fit coordinator is listening") ; po::options_description all_desc; all_desc.add(main_desc).add(common_desc).add(gen_desc).add(fit_desc).add(simfit_desc); po::variables_map vm; try { po::store(po::command_line_parser(argc, argv). options(all_desc). positional(p). run(), vm); po::notify(vm); } catch ( boost::wrapexcept& e ) { std::cout << argv[0] << " requires a command, one of 'gen', 'fit', or 'simfit'\n" << "Append --help to those commands to see help on the related options" << std::endl; parsedOK = kFALSE; return; } catch ( po::validation_error& e ) { std::cerr << e.what() << std::endl; parsedOK = kFALSE; return; } if ( vm.count("help") ) { po::options_description help_desc; help_desc.add(common_desc).add(gen_desc).add(fit_desc).add(simfit_desc); std::cout << help_desc << std::endl; helpRequested = kTRUE; return; } } diff --git a/examples/ProgOpts/Test_Dpipi_ProgOpts.hh b/examples/ProgOpts/Test_Dpipi_ProgOpts.hh index f09e4f9..807d115 100644 --- a/examples/ProgOpts/Test_Dpipi_ProgOpts.hh +++ b/examples/ProgOpts/Test_Dpipi_ProgOpts.hh @@ -1,34 +1,42 @@ #ifndef TEST_DPIPI_PROGOPTS #define TEST_DPIPI_PROGOPTS #include "Rtypes.h" -#include "LauDecayTimePdf.hh" +#include "LauDecayTime.hh" #include "LauTimeDepFitModel.hh" #include "Command.hh" +#include struct TestDpipi_ProgramSettings { TestDpipi_ProgramSettings(const int argc, const char ** argv); Bool_t parsedOK{kTRUE}; Bool_t helpRequested{kFALSE}; Command command{Command::Generate}; LauTimeDepFitModel::CPEigenvalue dType{LauTimeDepFitModel::CPEigenvalue::QFS}; + LauDecayTime::EfficiencyMethod timeEffModel{LauDecayTime::EfficiencyMethod::Uniform}; + UInt_t firstExptGen{0}; UInt_t nExptGen{1}; UInt_t firstExptFit{0}; UInt_t nExptFit{1}; UInt_t iFit{0}; UInt_t port{0}; UInt_t RNGseed{0}; - LauDecayTimePdf::EfficiencyMethod timeEffModel{LauDecayTimePdf::EfficiencyMethod::Flat}; + Bool_t timeResolution{kTRUE}; Bool_t perEventTimeErr{kFALSE}; Bool_t fixLifetime{kTRUE}; Bool_t fixDeltaM{kTRUE}; Bool_t fixPhiMix{kFALSE}; + Bool_t fixSplineKnots{kFALSE}; + Bool_t useSinCos{kTRUE}; + Bool_t useAveDeltaCalibVals{kTRUE}; + + std::string directory{""}; }; #endif diff --git a/examples/Test_Dpipi.cc b/examples/Test_Dpipi.cc index d5def1b..4daf289 100644 --- a/examples/Test_Dpipi.cc +++ b/examples/Test_Dpipi.cc @@ -1,360 +1,435 @@ #include #include #include #include #include "TFile.h" #include "TH2.h" #include "TRandom.h" #include "TString.h" #include "TSystem.h" #include "TF1.h" #include "TCanvas.h" +#include "Lau1DHistPdf.hh" +#include "Lau1DCubicSpline.hh" +#include "LauBinnedDecayTimeEfficiency.hh" +#include "LauBkgndDPModel.hh" #include "LauDaughters.hh" +#include "LauDecayTime.hh" #include "LauDecayTimePdf.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauDecayTimeResolution.hh" #include "LauEffModel.hh" +#include "LauFlavTag.hh" #include "LauIsobarDynamics.hh" #include "LauMagPhaseCoeffSet.hh" #include "LauRandom.hh" #include "LauRealImagCoeffSet.hh" +#include "LauSplineDecayTimeEfficiency.hh" #include "LauTimeDepFitModel.hh" #include "LauVetoes.hh" -#include "LauFlavTag.hh" -#include "Lau1DHistPdf.hh" -#include "Lau1DCubicSpline.hh" -#include "LauBkgndDPModel.hh" #include "Test_Dpipi_ProgOpts.hh" int main(const int argc, const char ** argv) { const TestDpipi_ProgramSettings settings{argc,argv}; if ( settings.helpRequested ) { return EXIT_SUCCESS; } if ( ! settings.parsedOK ) { return EXIT_FAILURE; } - const Bool_t fixPhiMix{ settings.fixPhiMix || settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS }; - const Bool_t useSinCos{kTRUE}; - - Double_t nSigEvents{0}; - switch (settings.dType) { - case LauTimeDepFitModel::CPEigenvalue::CPEven : - nSigEvents = 15000; - break; - case LauTimeDepFitModel::CPEigenvalue::CPOdd : - nSigEvents = 5000; - break; - case LauTimeDepFitModel::CPEigenvalue::QFS : - nSigEvents = 50000; - break; - } - LauRandom::setSeed(settings.RNGseed); LauDaughters* daughtersB0bar = new LauDaughters("B0_bar", "pi+", "pi-", "D0"); LauDaughters* daughtersB0 = new LauDaughters("B0", "pi+", "pi-", "D0_bar"); // efficiency LauVetoes* vetoes = new LauVetoes(); - //vetoes->addMassVeto( 2, 2.00776, 2.01276 ); LauEffModel* effModelB0bar = new LauEffModel(daughtersB0bar, vetoes); LauEffModel* effModelB0 = new LauEffModel(daughtersB0, vetoes); - //Set up backgrounds before flavTag construction - //std::map bkgndInfo{{"Bkgnd1",LauFlavTag::BkgndType::Combinatorial},{"Bkgnd2",LauFlavTag::BkgndType::SignalLike}}; - //const Double_t nBkgnd1(100), nBkgnd2(200); - //Names of these LauParameters must match the name of the background they represent - //LauParameter* Bkgnd1Yield = new LauParameter("Bkgnd1",nBkgnd1,-1.0*nBkgnd1,2.0*nBkgnd1,kFALSE); - //LauParameter* Bkgnd2Yield = new LauParameter("Bkgnd2",nBkgnd2,-1.0*nBkgnd2,2.0*nBkgnd2,kFALSE); + // background types + /* + std::map bkgndInfo { + {"Bkgnd1",LauFlavTag::BkgndType::Combinatorial}, + {"Bkgnd2",LauFlavTag::BkgndType::SignalLike} + }; + */ - //Args for flavTag: useAveDelta - kFALSE and useEtaPrime - kFALSE - //LauFlavTag* flavTag = new LauFlavTag(kFALSE,kFALSE,bkgndInfo); - LauFlavTag* flavTag = new LauFlavTag(kFALSE,kFALSE); - flavTag->setTrueTagVarName("trueTag"); + // setup flavour tagging + const Bool_t useAveDeltaCalibVals { settings.useAveDeltaCalibVals }; + const Bool_t useEtaPrime { kFALSE }; + LauFlavTag* flavTag = new LauFlavTag(useAveDeltaCalibVals,useEtaPrime); + //LauFlavTag* flavTag = new LauFlavTag(useAveDeltaCalibVals,useEtaPrime,BkgndTypes); if (settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS) { flavTag->setDecayFlvVarName("decayFlv"); } + flavTag->setTrueTagVarName("trueTag"); TFile* etaFile = TFile::Open("ft-eta-hist.root"); TH1* etaHist = dynamic_cast(etaFile->Get("ft_eta_hist")); // Crude check as to whether we're doing perfect vs realistic mis-tag // - in the former case all entries should be in the first bin // If the tagging is perfect then don't interpolate the eta histogram // and also make it perfectly efficient, otherwise do interpolate and // make it 50% efficient const Double_t meanEta { etaHist->GetMean() }; const Bool_t interpolateEtaHist { meanEta > etaHist->GetBinWidth(1) }; Lau1DHistPdf* etaHistPDF = new Lau1DHistPdf( "eta", etaHist, 0.0, 0.5, interpolateEtaHist, kFALSE ); const Double_t tagEffVal { (interpolateEtaHist) ? 0.5 : 1.0 }; - std::pair tagEff {tagEffVal, tagEffVal}; + std::pair tagEff {tagEffVal, useAveDeltaCalibVals ? 0.0 : tagEffVal}; // use a null calibration for the time being, so p0 = and p1 = 1 - std::pair calib0 {meanEta, meanEta}; - std::pair calib1 {1.0, 1.0}; + std::pair calib0 {meanEta, useAveDeltaCalibVals ? 0.0 : meanEta}; + std::pair calib1 {1.0, useAveDeltaCalibVals ? 0.0 : 1.0}; flavTag->addTagger("OSTagger", "tagVal_OS", "mistagVal_OS", etaHistPDF, tagEff, calib0, calib1); + flavTag->floatAllCalibPars(); - //FlavTag Bkgnd things - //flavTag->setBkgndParams("Bkgnd1", "OSTagger", etaHistPDF, {1.0, 1.0}); - //flavTag->setBkgndParams("Bkgnd2", "OSTagger", etaHistPDF, {1.0, 1.0}); + // flavour tagging setup for backgrounds + /* + std::map BkgndEtas; + TFile* bkgndEtaFile = TFile::Open("ft-bkgnd-eta-hists.root"); + for(auto& [name, _] : BkgndTypes) + { + TH1* bkgndEtaHist = dynamic_cast(bkgndEtaFile->Get( name+"_eta_hist" )); + Lau1DHistPdf* bkgndEtaHistPDF = new Lau1DHistPdf("eta",bkgndEtaHist,0.0,0.5,kTRUE,kFALSE); + BkgndEtas.emplace( std::make_pair(name, bkgndEtaHistPDF) ); + } + for(auto& [name,hist] : BkgndEtas) + {flavTag->setBkgndParams(name, "IFT", hist, tagEff );} + */ // signal dynamics LauIsobarDynamics* sigModelB0bar = new LauIsobarDynamics(daughtersB0bar, effModelB0bar); sigModelB0bar->setIntFileName("integ_B0bar.dat"); sigModelB0bar->addResonance("D*+_2", 2, LauAbsResonance::RelBW); sigModelB0bar->addResonance("D*+_0", 2, LauAbsResonance::RelBW); sigModelB0bar->addResonance("rho0(770)", 3, LauAbsResonance::RelBW); sigModelB0bar->addResonance("f_0(980)", 3, LauAbsResonance::RelBW); sigModelB0bar->addResonance("f_2(1270)", 3, LauAbsResonance::RelBW); LauIsobarDynamics* sigModelB0 = new LauIsobarDynamics(daughtersB0, effModelB0); sigModelB0->setIntFileName("integ_B0.dat"); sigModelB0->addResonance("D*-_2", 1, LauAbsResonance::RelBW); sigModelB0->addResonance("D*-_0", 1, LauAbsResonance::RelBW); sigModelB0->addResonance("rho0(770)", 3, LauAbsResonance::RelBW); sigModelB0->addResonance("f_0(980)", 3, LauAbsResonance::RelBW); sigModelB0->addResonance("f_2(1270)", 3, LauAbsResonance::RelBW); // fit model LauTimeDepFitModel* fitModel = new LauTimeDepFitModel(sigModelB0bar,sigModelB0,flavTag); - //fitModel->setNBkgndEvents(Bkgnd1Yield); - //fitModel->setNBkgndEvents(Bkgnd2Yield); - //LauBkgndDPModel* Bkgnd1Model = new LauBkgndDPModel(daughtersB0, vetoes); - //LauBkgndDPModel* Bkgnd1Modelbar = new LauBkgndDPModel(daughtersB0bar, vetoes); - //LauBkgndDPModel* Bkgnd2Model = new LauBkgndDPModel(daughtersB0, vetoes); - //LauBkgndDPModel* Bkgnd2Modelbar = new LauBkgndDPModel(daughtersB0bar, vetoes); - //fitModel->setBkgndDPModels( "Bkgnd1", Bkgnd1Model, Bkgnd1Modelbar ); - //fitModel->setBkgndDPModels( "Bkgnd2", Bkgnd2Model, Bkgnd2Modelbar ); std::vector coeffset; coeffset.push_back( new LauRealImagCoeffSet("D*+_2", 1.00, 0.00, kTRUE, kTRUE) ); coeffset.push_back( new LauRealImagCoeffSet("D*+_0", 0.53*TMath::Cos( 3.00), 0.53*TMath::Sin( 3.00), kFALSE, kFALSE) ); coeffset.push_back( new LauRealImagCoeffSet("rho0(770)", 1.22*TMath::Cos( 2.25), 1.22*TMath::Sin( 2.25), kFALSE, kFALSE) ); coeffset.push_back( new LauRealImagCoeffSet("f_0(980)", 0.19*TMath::Cos(-2.48), 0.19*TMath::Sin(-2.48), kFALSE, kFALSE) ); coeffset.push_back( new LauRealImagCoeffSet("f_2(1270)", 0.75*TMath::Cos( 2.97), 0.75*TMath::Sin( 2.97), kFALSE, kFALSE) ); for (std::vector::iterator iter=coeffset.begin(); iter!=coeffset.end(); ++iter) { fitModel->setAmpCoeffSet(*iter); } + // background DP models + /* + LauBkgndDPModel* Bkgnd1Model = new LauBkgndDPModel(daughtersB0, vetoes); + LauBkgndDPModel* Bkgnd1Modelbar = new LauBkgndDPModel(daughtersB0bar, vetoes); + LauBkgndDPModel* Bkgnd2Model = new LauBkgndDPModel(daughtersB0, vetoes); + LauBkgndDPModel* Bkgnd2Modelbar = new LauBkgndDPModel(daughtersB0bar, vetoes); + fitModel->setBkgndDPModels( "Bkgnd1", Bkgnd1Model, Bkgnd1Modelbar ); + fitModel->setBkgndDPModels( "Bkgnd2", Bkgnd2Model, Bkgnd2Modelbar ); + */ + + // decay type and mixing parameter + const Bool_t fixPhiMix{ settings.fixPhiMix || settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS }; + const Bool_t useSinCos{ settings.useSinCos }; + fitModel->setCPEigenvalue( settings.dType ); fitModel->setPhiMix( 2.0*LauConstants::beta, fixPhiMix, useSinCos ); - // production asymmetry - fitModel->setAsymmetries(0.0,kTRUE); + // production asymmetries + fitModel->setAsymmetries( 0.0, kTRUE ); + /* + for(auto& [name, _] : BkgndTypes) { + fitModel->setBkgndAsymmetries( name, 0.0, kTRUE ); + } + */ - // Delta t PDFs + // decay time PDFs const Double_t minDt(0.0); const Double_t maxDt(15.0); const Double_t minDtErr(0.0); const Double_t maxDtErr(0.15); - const std::vector scale { - settings.perEventTimeErr && kTRUE, - settings.perEventTimeErr && kTRUE, + + LauParameter * tau = new LauParameter("dt_tau", 1.519, 0.5, 5.0, settings.fixLifetime); + LauParameter * freq = new LauParameter("dt_deltaM", 0.5064, 0.0, 1.0, settings.fixDeltaM); + + std::vector dtPhysPars { + tau, + freq }; - const UInt_t nGauss(scale.size()); + + auto dtPhysModel = std::make_unique( LauDecayTime::FuncType::ExpTrig, dtPhysPars ); + + const std::vector scale { + settings.perEventTimeErr && true, + settings.perEventTimeErr && true, + }; + const std::size_t nGauss{scale.size()}; LauParameter * mean0 = new LauParameter("dt_mean_0", scale[0] ? -2.01290e-03 : -2.25084e-03, -0.01, 0.01, kTRUE ); LauParameter * mean1 = new LauParameter("dt_mean_1", scale[1] ? -2.01290e-03 : -5.04275e-03, -0.01, 0.01, kTRUE ); LauParameter * sigma0 = new LauParameter("dt_sigma_0", scale[0] ? 9.95145e-01 : 3.03923e-02, 0.0, 2.0, kTRUE ); LauParameter * sigma1 = new LauParameter("dt_sigma_1", scale[1] ? 1.81715e+00 : 6.22376e-02, 0.0, 2.5, kTRUE ); LauParameter * frac1 = new LauParameter("dt_frac_1", scale[0] && scale[1] ? 1.-9.35940e-01 : 1.-7.69603e-01, 0.0, 1.0, kTRUE); - LauParameter * tau = new LauParameter("dt_tau", 1.519, 0.5, 5.0, settings.fixLifetime); - LauParameter * freq = new LauParameter("dt_deltaM", 0.5064, 0.0, 1.0, settings.fixDeltaM); - std::vector dtPars { + std::vector dtResoPars { mean0, mean1, sigma0, sigma1, - frac1, - tau, - freq + frac1 }; - LauDecayTimePdf * dtPdf = new LauDecayTimePdf( "decayTime", "decayTimeErr", dtPars, minDt, maxDt, minDtErr, maxDtErr, LauDecayTimePdf::ExpTrig, nGauss, scale, LauDecayTimePdf::DecayTime, settings.timeEffModel ); - - // Use decay time resolution - dtPdf->doSmearing(settings.timeResolution); + auto dtResoModel = std::make_unique( nGauss, dtResoPars, scale ); // Decay time error histogram // (always set this so that it gets generated properly, whether we're using it in the PDF or not) - TFile* dteFile = TFile::Open("dte-hist.root"); - TH1* dteHist = dynamic_cast(dteFile->Get("dte_hist")); - dtPdf->setErrorHisto( dteHist ); + TFile* dtErrFile = TFile::Open("dte-hist.root"); + TH1* dtErrHist = dynamic_cast(dtErrFile->Get("dte_hist")); + + LauDecayTimePdf * dtPdf{nullptr}; + if ( settings.timeResolution ) { + if ( settings.perEventTimeErr ) { + dtPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, "decayTimeErr", minDtErr, maxDtErr, std::move(dtPhysModel), std::move(dtResoModel), dtErrHist ); + } else { + dtPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, std::move(dtPhysModel), std::move(dtResoModel) ); + } + } else { + dtPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, std::move(dtPhysModel) ); + } // Decay time acceptance histogram TFile* dtaFile = TFile::Open("dta-hist.root"); TH1* dtaHist = dynamic_cast(dtaFile->Get("dta_hist")); // Create the spline knot positions and // starting Y values, to be fit to dtaHist const std::vector dtvals {0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 7.0, 10.0, 15.0 }; const std::vector effvals {0.0, 0.01, 0.022, 0.035, 0.042, 0.05, 0.051, 0.052, 0.055}; - Lau1DCubicSpline* dtEffSpline { nullptr }; switch(settings.timeEffModel) { - case LauDecayTimePdf::EfficiencyMethod::Spline: + case LauDecayTime::EfficiencyMethod::Spline: { fitModel->setASqMaxValue(0.06); - dtEffSpline = new Lau1DCubicSpline(dtvals,effvals,Lau1DCubicSpline::AkimaSpline,Lau1DCubicSpline::Natural,Lau1DCubicSpline::Natural); + auto dtEffSpline = std::make_unique(dtvals,effvals,Lau1DCubicSpline::AkimaSpline,Lau1DCubicSpline::Natural,Lau1DCubicSpline::Natural); dtEffSpline->fitToTH1(dtaHist); - dtPdf->setEffiSpline(dtEffSpline); + + auto dtaModel = std::make_unique>( std::move(dtEffSpline) ); // Set which knots to float and which to fix (at least 1 knot must be fixed, not the first one) - // Knots should only be floating if both DeltaM and the B lifetime are fixed! - for(auto& par : dtPdf -> getEffiPars()) - {par -> fixed( not (settings.fixDeltaM and settings.fixLifetime) );} - dtPdf -> getEffiPars().at(3) -> fixed(kTRUE); - dtPdf -> getEffiPars().at(0) -> fixed(kTRUE); + // Knots should only be floating if requested AND the B lifetime is fixed! + if ( settings.fixSplineKnots or not settings.fixLifetime ) { + dtaModel->fixKnots(); + } else { + dtaModel->floatKnots(); + dtaModel->fixKnot( 0, true ); + dtaModel->fixKnot( 3, true ); + } + + dtPdf->setSplineEfficiency( std::move(dtaModel) ); + break; } - case LauDecayTimePdf::EfficiencyMethod::Binned: + case LauDecayTime::EfficiencyMethod::Binned: { fitModel->setASqMaxValue(0.06); - dtPdf->setEffiHist(dtaHist); + + auto dtaModel = std::make_unique( *dtaHist ); + dtPdf->setBinnedEfficiency( std::move(dtaModel) ); + break; } - case LauDecayTimePdf::EfficiencyMethod::Flat: + case LauDecayTime::EfficiencyMethod::Uniform: { fitModel->setASqMaxValue(4.45); break; } } fitModel->setSignalDtPdf( dtPdf ); //Background dt part - //fitModel->setBkgndDtPdf("Bkgnd1",dtPdf); - //fitModel->setBkgndDtPdf("Bkgnd2",dtPdf); + /* + TFile* background_dt = TFile::Open("Lifetimes_PV_WL.root"); + TH1* bkgnd1DtHist = dynamic_cast( background_dt->Get("Bkgnd1") ); + TH1* bkgnd2DtHist = dynamic_cast( background_dt->Get("Bkgnd2") ); + TH1* bkgnd1DtErrHist = dynamic_cast( background_dt->Get("Bkgnd1_Err") ); + TH1* bkgnd2DtErrHist = dynamic_cast( background_dt->Get("Bkgnd2_Err") ); + LauDecayTimePdf* bkgnd1DtPdf{nullptr}; + LauDecayTimePdf* bkgnd2DtPdf{nullptr}; + if ( settings.timeResolution and settings.perEventTimeErr ) { + bkgnd1DtPdf= new LauDecayTimePdf( "decayTime", minDt, maxDt, bkgnd1DtHist, bkgnd1DtErrHist ); + bkgnd2DtPdf= new LauDecayTimePdf( "decayTime", minDt, maxDt, bkgnd2DtHist, bkgnd2DtErrHist ); + } else { + bkgnd1DtPdf= new LauDecayTimePdf( "decayTime", minDt, maxDt, bkgnd1DtHist ); + bkgnd2DtPdf= new LauDecayTimePdf( "decayTime", minDt, maxDt, bkgnd2DtHist ); + } + fitModel->setBkgndDtPdf("Bkgnd1",bkgnd1DtPdf); + fitModel->setBkgndDtPdf("Bkgnd2",bkgnd2DtPdf); + */ + + // set the signal yield + Double_t nSigEvents{0}; + switch (settings.dType) { + case LauTimeDepFitModel::CPEigenvalue::CPEven : + nSigEvents = 15000; + break; + case LauTimeDepFitModel::CPEigenvalue::CPOdd : + nSigEvents = 5000; + break; + case LauTimeDepFitModel::CPEigenvalue::QFS : + nSigEvents = 50000; + break; + } + + // set the background yields + /* + const Double_t nBkgnd1(100), nBkgnd2(200); + LauParameter* Bkgnd1Yield = new LauParameter("Bkgnd1",nBkgnd1,-1.0*nBkgnd1,2.0*nBkgnd1,kFALSE); + LauParameter* Bkgnd2Yield = new LauParameter("Bkgnd2",nBkgnd2,-1.0*nBkgnd2,2.0*nBkgnd2,kFALSE); + fitModel->setNBkgndEvents(Bkgnd1Yield); + fitModel->setNBkgndEvents(Bkgnd2Yield); + */ - // set the number of signal events std::cout<<"nSigEvents = "<setNSigEvents(nSigPar); // set the number of experiments if (settings.command == Command::Generate) { fitModel->setNExpts(settings.nExptGen, settings.firstExptGen); } else { fitModel->setNExpts(settings.nExptFit, settings.firstExptFit); } fitModel->useAsymmFitErrors(kFALSE); fitModel->useRandomInitFitPars(kFALSE); - fitModel->doPoissonSmearing(kFALSE); - fitModel->doEMLFit(kFALSE); fitModel->writeLatexTable(kFALSE); + const Bool_t haveBkgnds = ( fitModel->nBkgndClasses() > 0 ); + fitModel->doPoissonSmearing(haveBkgnds); + fitModel->doEMLFit(haveBkgnds); TString dTypeStr; switch (settings.dType) { case LauTimeDepFitModel::CPEigenvalue::CPEven : dTypeStr = "CPEven"; break; case LauTimeDepFitModel::CPEigenvalue::CPOdd : dTypeStr = "CPOdd"; break; case LauTimeDepFitModel::CPEigenvalue::QFS : dTypeStr = "QFS"; break; } TString dataFile(""); TString treeName("fitTree"); TString rootFileName(""); TString tableFileName(""); TString fitToyFileName(""); TString splotFileName(""); dataFile = "TEST-Dpipi"; dataFile += "_"+dTypeStr; switch(settings.timeEffModel) { - case LauDecayTimePdf::EfficiencyMethod::Spline: + case LauDecayTime::EfficiencyMethod::Spline: dataFile += "_Spline"; break; - case LauDecayTimePdf::EfficiencyMethod::Binned: - dataFile += "_Hist"; + case LauDecayTime::EfficiencyMethod::Binned: + dataFile += "_Binned"; break; - case LauDecayTimePdf::EfficiencyMethod::Flat: - dataFile += "_Flat"; + case LauDecayTime::EfficiencyMethod::Uniform: + dataFile += "_Uniform"; break; } if (settings.timeResolution) { if (settings.perEventTimeErr) { dataFile += "_DTRperevt"; } else { dataFile += "_DTRavg"; } } else { dataFile += "_DTRoff"; } dataFile += "_expts"; dataFile += settings.firstExptGen; dataFile += "-"; dataFile += settings.firstExptGen+settings.nExptGen-1; dataFile += ".root"; if (settings.command == Command::Generate) { rootFileName = "dummy.root"; tableFileName = "genResults"; } else { rootFileName = "fit"; rootFileName += settings.iFit; rootFileName += "_Results_"; rootFileName += dTypeStr; rootFileName += "_expts"; rootFileName += settings.firstExptFit; rootFileName += "-"; rootFileName += settings.firstExptFit+settings.nExptFit-1; rootFileName += ".root"; tableFileName = "fit"; tableFileName += settings.iFit; tableFileName += "_Results_"; tableFileName += dTypeStr; tableFileName += "_expts"; tableFileName += settings.firstExptFit; tableFileName += "-"; tableFileName += settings.firstExptFit+settings.nExptFit-1; fitToyFileName = "fit"; fitToyFileName += settings.iFit; fitToyFileName += "_ToyMC_"; fitToyFileName += dTypeStr; fitToyFileName += "_expts"; fitToyFileName += settings.firstExptFit; fitToyFileName += "-"; fitToyFileName += settings.firstExptFit+settings.nExptFit-1; fitToyFileName += ".root"; splotFileName = "fit"; splotFileName += settings.iFit; splotFileName += "_sPlot_"; splotFileName += dTypeStr; splotFileName += "_expts"; splotFileName += settings.firstExptFit; splotFileName += "-"; splotFileName += settings.firstExptFit+settings.nExptFit-1; splotFileName += ".root"; } // Generate toy from the fitted parameters //fitModel->compareFitData(1, fitToyFileName); // Write out per-event likelihoods and sWeights //fitModel->writeSPlotData(splotFileName, "splot", kFALSE); // Execute the generation/fit switch (settings.command) { case Command::Generate : fitModel->run( "gen", dataFile, treeName, rootFileName, tableFileName ); break; case Command::Fit : fitModel->run( "fit", dataFile, treeName, rootFileName, tableFileName ); break; case Command::SimFit : fitModel->runTask( dataFile, treeName, rootFileName, tableFileName, "localhost", settings.port ); break; } return EXIT_SUCCESS; } diff --git a/inc/Lau1DCubicSpline.hh b/inc/Lau1DCubicSpline.hh index 2beac6d..cb039fe 100644 --- a/inc/Lau1DCubicSpline.hh +++ b/inc/Lau1DCubicSpline.hh @@ -1,251 +1,260 @@ /* Copyright 2015 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file Lau1DCubicSpline.hh \brief File containing declaration of Lau1DCubicSpline class. */ /*! \class Lau1DCubicSpline \brief Class for defining a 1D cubic spline based on a set of knots. Class for defining a 1D cubic spline based on a set of knots. Interpolation between the knots is performed either by one of two types of cubic spline (standard or Akima) or by linear interpolation. The splines are defined by a piecewise cubic function which, between knots i and i+1, has the form f_i(x) = (1 - t)*y_i + t*y_i+1 + t*(1 - t)(a*(1 - t) + b*t) where t = (x - x_i)/(x_i+1 - x_i), a = k_i *(x_i+1 - x_i) - (y_i+1 - y_i), b = -k_i+1*(x_i+1 - x_i) + (y_i+1 - y_i) and k_i is (by construction) the first derivative at knot i. f(x) and f'(x) are continuous at the internal knots by construction. For the standard splines, f''(x) is required to be continuous at all internal knots placing n-2 constraints on the n parameters, k_i. The final two constraints are set by the boundary conditions. At each boundary, the function may be: (i) Clamped : f'(x) = C at the last knot (ii) Natural : f''(x) = 0 at the last knot (iii) Not a knot : f'''(x) continuous at the second last knot The algorithms used in these splines can be found on: http://en.wikipedia.org/wiki/Spline_interpolation#Algorithm_to_find_the_interpolating_cubic_spline http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm For the Akima splines, the values of k_i are determined from the slopes of the four nearest segments (a_i-1, a_i, a_i+1 and a_i+2) as k_i = ( | a_i+2 - a_i+1 | a_i + | a_i - a_i-1 | a_i+1 ) / ( | a_i+2 - a_i+1 | + | a_i - a_i-1 | ) and as k_i = ( a_i + a_i+1 ) / 2 in the special case a_i-1 == a_i != a_i+1 == a_i+2. Boundary conditions are specified by the relations a_2 - a_1 = a_1 - a_0 = a_0 - a_-1 and a_n-1 - a_n-2 = a_n - a_n-1 = a_n+1 - a_n The algorithms used in these splines can be found in: J.ACM vol. 17 no. 4 pp 589-602 */ #ifndef LAU_1DCUBICSPLINE #define LAU_1DCUBICSPLINE #include #include #include "LauParameter.hh" #include "Rtypes.h" #include "TF1.h" class Lau1DCubicSpline { public: //! Define the allowed interpolation types enum LauSplineType { StandardSpline, /*!< standard cubic splines with f''(x) continuous at all internal knots */ AkimaSpline, /*!< Akima cubic splines with f'(x) at each knot defined locally by the positions of only five knots */ LinearInterpolation /*! Linear interpolation between each pair of knots */ }; //! Define the allowed boundary condition types /*! These are only supported by standard splines */ enum LauSplineBoundaryType { Clamped, /*!< clamped boundary - f'(x) = C */ Natural, /*!< natural boundary - f''(x) = 0 */ NotAKnot /*!< 'not a knot' boundary - f'''(x) continuous at second last knot */ }; //! Constructor /*! /param [in] xs the x-values of the knots /param [in] ys the y-values of the knots /param [in] leftBound the left-hand boundary condition /param [in] rightBound the right-hand boundary condition /param [in] dydx0 the gradient at the left-hand boundary of a clamped spline /param [in] dydxn the gradient at the right-hand boundary of a clamped spline */ Lau1DCubicSpline(const std::vector& xs, const std::vector& ys, LauSplineType type = Lau1DCubicSpline::StandardSpline, LauSplineBoundaryType leftBound = Lau1DCubicSpline::NotAKnot, LauSplineBoundaryType rightBound = Lau1DCubicSpline::NotAKnot, Double_t dydx0 = 0.0, Double_t dydxn = 0.0); //! Destructor virtual ~Lau1DCubicSpline(); //! Evaluate the function at given point /*! \param [in] x the x-coordinate \return the value of the spline at x */ Double_t evaluate(Double_t x) const; //! Update the y-values of the knots /*! \param [in] ys the y-values of the knots */ void updateYValues(const std::vector& ys); + //! Update the y-values of the knots + /*! + \param [in] ys the y-values of the knots + */ void updateYValues(const std::vector& ys); + //! Update the y-values of the knots + /*! + \param [in] ys the y-values of the knots + */ + void updateYValues(const std::vector& ys); //! Update the type of interpolation to perform /*! \param [in] type the type of interpolation */ void updateType(LauSplineType type); //! Update the boundary conditions for the spline /*! /param [in] leftBound the left-hand boundary condition /param [in] rightBound the right-hand boundary condition /param [in] dydx0 the gradient at the left-hand boundary of a clamped spline /param [in] dydxn the gradient at the right-hand boundary of a clamped spline */ void updateBoundaryConditions(LauSplineBoundaryType leftBound, LauSplineBoundaryType rightBound, Double_t dydx0 = 0.0, Double_t dydxn = 0.0); //! Return the number of knots UInt_t getnKnots() const {return nKnots_;} //! Get y values const std::vector& getYValues() const {return y_;} //! Get x values const std::vector& getXValues() const {return x_;} //! Get the coefficients of spline section i in the form a + bx + cx^2 + dx^3 /*! \params [in] i refers to the left-hand index of the knot (i = 0 gets the params between x_0 and x_1) \return coefficients {a, b, c, d} */ std::array getCoefficients(const UInt_t i, const bool normalise = false) const; //! Get the integral over all the spline segments Double_t integral() const; //! Make a TF1 showing the spline with its current knot values /*! \params [in] normalise whether or not you want the spline normalised \return 1D function object */ TF1* makeTF1(const bool normalise = false) const; //! Fit the the normalisation of the spline to a TH1 //! So they look as good as possible when plotted on top of one another //! Useful when a sample has been generated with a hist and fitted with a spline to compare them /*! \params [in] The histogram to be fit to \return a TF1 fit to `hist` */ TF1* normaliseToTH1(TH1* hist) const; //! Fit the spline to a TH1 //! Useful as a starting-point for fitting a spline to data (maybe the hist is taken from MC) /*! \params [in] The histogram to be fit to */ void fitToTH1(TH1* hist); private: //! Copy constructor - not implemented Lau1DCubicSpline( const Lau1DCubicSpline& rhs ); //! Copy assignment operator - not implemented Lau1DCubicSpline& operator=(const Lau1DCubicSpline& rhs); //! Initialise the class void init(); //! Calculate the first derivative at each knot void calcDerivatives(); //! Calculate the first derivatives according to the standard method void calcDerivativesStandard(); //! Calculate the first derivatives according to the Akima method void calcDerivativesAkima(); //! The number of knots in the spline const UInt_t nKnots_; //! The x-value at each knot std::vector x_; //! The y-value at each knot std::vector y_; //! The first derivative at each knot std::vector dydx_; //! The 'a' coefficients used to determine the derivatives std::vector a_; //! The 'b' coefficients used to determine the derivatives std::vector b_; //! The 'c' coefficients used to determine the derivatives std::vector c_; //! The 'd' coefficients used to determine the derivatives std::vector d_; //! The type of interpolation to be performed LauSplineType type_; //! The left-hand boundary condition on the spline LauSplineBoundaryType leftBound_; //! The right-hand boundary condition on the spline LauSplineBoundaryType rightBound_; //! The gradient at the left boundary for a clamped spline Double_t dydx0_; //! The gradient at the right boundary for a clamped spline Double_t dydxn_; ClassDef(Lau1DCubicSpline, 0); // Class for defining a 1D cubic spline }; #endif diff --git a/inc/LauAbsDecayTimeCalculator.hh b/inc/LauAbsDecayTimeCalculator.hh new file mode 100644 index 0000000..04d4ed2 --- /dev/null +++ b/inc/LauAbsDecayTimeCalculator.hh @@ -0,0 +1,127 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauAbsDecayTimeCalculator.hh + \brief File containing declaration of LauAbsDecayTimeCalculator class. +*/ + +#ifndef LAU_ABS_DECAYTIME_CALCULATOR +#define LAU_ABS_DECAYTIME_CALCULATOR + +#include + +#include "Rtypes.h" + +class LauParameter; + +/*! \struct LauDecayTimeTerms + \brief Struct for gathering the normalisation terms for the decay time model +*/ + +struct LauDecayTimeTerms { + //! The exp(-G*t) term + Double_t expTerm{0.0}; + //! The cos(Dm*t) term (multiplied by the exponential) + Double_t cosTerm{0.0}; + //! The sin(Dm*t) term (multiplied by the exponential) + Double_t sinTerm{0.0}; + //! The cosh(DG/2*t) term (multiplied by the exponential) + Double_t coshTerm{0.0}; + //! The sinh(DG/2*t) term (multiplied by the exponential) + Double_t sinhTerm{0.0}; + + /* TODO do we need these? + //! Define operator+= so we can sum these + LauDecayTimeTerms& operator+=( const LauDecayTimeTerms& rhs ) + { + expTerm += rhs.expTerm; + cosTerm += rhs.cosTerm; + sinTerm += rhs.sinTerm; + coshTerm += rhs.coshTerm; + sinhTerm += rhs.sinhTerm; + return *this; + } + + //! Define operator*= so we can scale by a constant + LauDecayTimeTerms& operator*=( const Double_t scale ) + { + expTerm *= scale; + cosTerm *= scale; + sinTerm *= scale; + coshTerm *= scale; + sinhTerm *= scale; + return *this; + } + */ +}; + + +/*! \class LauAbsDecayTimeCalculator + \brief Class for defining the abstract interface for calculating the terms of the decay time model +*/ + +class LauAbsDecayTimeCalculator { + + public: + //! Cache information from data + /*! + \param [in] abscissas the values of the per-event decay time + \param [in] abscissaErrors the values of the per-event decay time error + */ + virtual void cacheInfo( const std::vector& abscissas, const std::vector& abscissaErrors ) = 0; + + //! Propagate any updates to parameters and recalculate information as neeeded + virtual void propagateParUpdates() = 0; + + //! Calculate the terms for given values of decay time and decay time error + /*! + \param [in] abscissa the decay time value + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + virtual LauDecayTimeTerms calcTerms( const Double_t abscissa, const std::array& abscissaError ) = 0; + + //! Retrieve the terms for a given event + /*! + \param [in] iEvt the event index + */ + virtual LauDecayTimeTerms getTerms( const std::size_t iEvt ) const = 0; + + //! Constructor + LauAbsDecayTimeCalculator() = default; + //! Destructor + virtual ~LauAbsDecayTimeCalculator() = default; + //! Copy constructor (deleted) + LauAbsDecayTimeCalculator(const LauAbsDecayTimeCalculator& other) = delete; + //! Copy assignment operator (deleted) + LauAbsDecayTimeCalculator& operator=(const LauAbsDecayTimeCalculator& other) = delete; + //! Move constructor (deleted) + LauAbsDecayTimeCalculator(LauAbsDecayTimeCalculator&& other) = default; + //! Move assignment operator (deleted) + LauAbsDecayTimeCalculator& operator=(LauAbsDecayTimeCalculator&& other) = default; + + private: + ClassDef(LauAbsDecayTimeCalculator, 0) +}; + +#endif diff --git a/inc/LauAbsDecayTimeEfficiency.hh b/inc/LauAbsDecayTimeEfficiency.hh new file mode 100644 index 0000000..8be89db --- /dev/null +++ b/inc/LauAbsDecayTimeEfficiency.hh @@ -0,0 +1,91 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauAbsDecayTimeEfficiency.hh + \brief File containing declaration of LauAbsDecayTimeEfficiency class. +*/ + +/*! \class LauAbsDecayTimeEfficiency + \brief Class for defining the abstract interface for modelling decay time efficiency +*/ + +#ifndef LAU_ABS_DECAYTIME_EFFICIENCY +#define LAU_ABS_DECAYTIME_EFFICIENCY + +#include + +#include "Rtypes.h" + +class LauAbsRValue; + + +class LauAbsDecayTimeEfficiency { + + public: + //! Retrieve the efficiency for a given value of the decay time + /*! + \param abscissa the value of the decay time + \return the efficiency + */ + virtual Double_t getEfficiency( const Double_t abscissa ) const = 0; + + //! Retrieve the parameters of the efficiency model so that they can be loaded into a fit + /*! + \return the parameters of the efficiency model + */ + virtual std::vector getParameters() = 0; + + //! Initialise the parameter cache + /*! + Must be called prior to starting fitting or generation + */ + virtual void initialise() = 0; + + //! Propagate any updates to parameters and recalculate information as neeeded + /*! + Should be called at each fit iteration + */ + virtual void propagateParUpdates() = 0; + + //! Retrieve whether any of the parameters have changed in the latest fit iteration + virtual const bool& anythingChanged() const = 0; + + //! Constructor + LauAbsDecayTimeEfficiency() = default; + //! Destructor + virtual ~LauAbsDecayTimeEfficiency() = default; + //! Copy constructor (deleted) + LauAbsDecayTimeEfficiency(const LauAbsDecayTimeEfficiency& other) = delete; + //! Copy assignment operator (deleted) + LauAbsDecayTimeEfficiency& operator=(const LauAbsDecayTimeEfficiency& other) = delete; + //! Move constructor (default) + LauAbsDecayTimeEfficiency(LauAbsDecayTimeEfficiency&& other) = default; + //! Move assignment operator (default) + LauAbsDecayTimeEfficiency& operator=(LauAbsDecayTimeEfficiency&& other) = default; + + private: + ClassDef(LauAbsDecayTimeEfficiency, 0) +}; + +#endif diff --git a/inc/LauAbsDecayTimeIntegrator.hh b/inc/LauAbsDecayTimeIntegrator.hh new file mode 100644 index 0000000..29acc9e --- /dev/null +++ b/inc/LauAbsDecayTimeIntegrator.hh @@ -0,0 +1,123 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauAbsDecayTimeIntegrator.hh + \brief File containing declaration of LauAbsDecayTimeIntegrator class. +*/ + +#ifndef LAU_ABS_DECAYTIME_INTEGRATOR +#define LAU_ABS_DECAYTIME_INTEGRATOR + +#include + +#include "Rtypes.h" + +class LauParameter; + +/*! \struct LauDecayTimeNormTerms + \brief Struct for gathering the normalisation terms for the decay time model +*/ + +struct LauDecayTimeNormTerms { + //! Normalisation of the exponential term + Double_t expTerm{0.0}; + //! Normalisation of the cos term + Double_t cosTerm{0.0}; + //! Normalisation of the sin term + Double_t sinTerm{0.0}; + //! Normalisation of the cosh term + Double_t coshTerm{0.0}; + //! Normalisation of the sinh term + Double_t sinhTerm{0.0}; + + //! Define operator+= so we can sum these + LauDecayTimeNormTerms& operator+=( const LauDecayTimeNormTerms& rhs ) + { + expTerm += rhs.expTerm; + cosTerm += rhs.cosTerm; + sinTerm += rhs.sinTerm; + coshTerm += rhs.coshTerm; + sinhTerm += rhs.sinhTerm; + return *this; + } + + //! Define operator*= so we can scale by a constant + LauDecayTimeNormTerms& operator*=( const Double_t scale ) + { + expTerm *= scale; + cosTerm *= scale; + sinTerm *= scale; + coshTerm *= scale; + sinhTerm *= scale; + return *this; + } +}; + + +/*! \class LauAbsDecayTimeIntegrator + \brief Class for defining the abstract interface for performing the integration of the decay time model +*/ + +class LauAbsDecayTimeIntegrator { + + public: + //! Cache information from data + /*! + \param [in] abscissaErrors the values of the per-event decay time error + */ + virtual void cacheInfo( const std::vector& abscissaErrors ) = 0; + + //! Propagate any updates to parameters and recalculate information as neeeded + virtual void propagateParUpdates() = 0; + + //! Retrieve the normalisation terms for a given event + /*! + \param [in] iEvt the event index + */ + virtual LauDecayTimeNormTerms getNormTerms( const std::size_t iEvt ) const = 0; + + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + virtual LauDecayTimeNormTerms calcNormTerms( const std::array& abscissaError = {1.0,1.0} ) const = 0; + + //! Constructor + LauAbsDecayTimeIntegrator() = default; + //! Destructor + virtual ~LauAbsDecayTimeIntegrator() = default; + //! Copy constructor (deleted) + LauAbsDecayTimeIntegrator(const LauAbsDecayTimeIntegrator& other) = delete; + //! Copy assignment operator (deleted) + LauAbsDecayTimeIntegrator& operator=(const LauAbsDecayTimeIntegrator& other) = delete; + //! Move constructor (deleted) + LauAbsDecayTimeIntegrator(LauAbsDecayTimeIntegrator&& other) = default; + //! Move assignment operator (deleted) + LauAbsDecayTimeIntegrator& operator=(LauAbsDecayTimeIntegrator&& other) = default; + + private: + ClassDef(LauAbsDecayTimeIntegrator, 0) +}; + +#endif diff --git a/inc/LauBinnedDecayTimeEfficiency.hh b/inc/LauBinnedDecayTimeEfficiency.hh new file mode 100644 index 0000000..cdcf6fd --- /dev/null +++ b/inc/LauBinnedDecayTimeEfficiency.hh @@ -0,0 +1,114 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauBinnedDecayTimeEfficiency.hh + \brief File containing declaration of LauBinnedDecayTimeEfficiency class. +*/ + +/*! \class LauBinnedDecayTimeEfficiency + \brief Class for defining a binned model for decay time efficiency +*/ + +#ifndef LAU_BINNED_DECAYTIME_EFFICIENCY +#define LAU_BINNED_DECAYTIME_EFFICIENCY + +#include +#include + +#include "Rtypes.h" +#include "TH1.h" + +#include "LauAbsDecayTimeEfficiency.hh" + +class LauAbsRValue; + + +class LauBinnedDecayTimeEfficiency : public LauAbsDecayTimeEfficiency { + + public: + //! Constructor + explicit LauBinnedDecayTimeEfficiency( const TH1& effHist ); + + //! Retrieve the efficiency for a given value of the decay time + /*! + \param abscissa the value of the decay time + \return the efficiency + */ + Double_t getEfficiency( const Double_t abscissa ) const override; + + //! Retrieve the parameters of the efficiency model so that they can be loaded into a fit + /*! + \return the parameters of the efficiency model + */ + std::vector getParameters() override { return {}; } + + //! Initialise the parameter cache + /*! + Must be called prior to starting fitting or generation + */ + void initialise() override {} + + //! Propagate any updates to parameters and recalculate information as neeeded + /*! + Should be called at each fit iteration + */ + void propagateParUpdates() override {} + + //! Retrieve whether any of the parameters have changed in the latest fit iteration + const bool& anythingChanged() const override { return anythingChanged_; } + + //! Struct to group all info on decay time efficiency bins + struct BinInfo { + //! Constructor + BinInfo( Double_t lowEdge, Double_t highEdge, Double_t eff ) : + loEdge{lowEdge}, + hiEdge{highEdge}, + efficiency{eff} + { + } + //! The low edge of the bin + Double_t loEdge; + //! The high edge of the bin + Double_t hiEdge; + //! The efficiency value + Double_t efficiency; + }; + + //! Obtain the binning information + /*! + \return the information on each bin + */ + std::vector getBinningInfo() const; + + private: + //! The binned values of the efficiency + const std::unique_ptr effHist_; + + //! Nothing will change but we need to be able to return that information as a reference + const bool anythingChanged_{false}; + + ClassDefOverride(LauBinnedDecayTimeEfficiency, 0) +}; + +#endif diff --git a/inc/LauDecayTime.hh b/inc/LauDecayTime.hh new file mode 100644 index 0000000..098590c --- /dev/null +++ b/inc/LauDecayTime.hh @@ -0,0 +1,59 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauDecayTime.hh + \brief File containing declaration of utilities for decay time modelling +*/ + +#ifndef LAU_DECAYTIME_UTILS +#define LAU_DECAYTIME_UTILS + +namespace LauDecayTime { + + // TODO - can we think of better names? + //! The functional form of the decay time PDF + enum class FuncType { + Hist, //< Hist PDF for fixed background + Delta, //< Delta function - for prompt background + Exp, //< Exponential function - for non-prompt background or charged B's + DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts + ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's + ExpHypTrig //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's + }; + + //! How is the decay time measured - absolute or difference? + enum class TimeMeasurementMethod { + DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario + DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario + }; + + //! How is the TD efficiency information going to be given? + enum class EfficiencyMethod { + Uniform, //< As a uniform distribution (constant) + Binned, //< As a histogram (TH1D/TH1F) + Spline //< As a cubic spline (or products thereof) + }; +} + +#endif diff --git a/inc/LauDecayTimePdf.hh b/inc/LauDecayTimePdf.hh index 0c23a77..b119c56 100644 --- a/inc/LauDecayTimePdf.hh +++ b/inc/LauDecayTimePdf.hh @@ -1,762 +1,529 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.hh \brief File containing declaration of LauDecayTimePdf class. */ /*! \class LauDecayTimePdf \brief Class for defining the PDFs used in the time-dependent fit model to describe the decay time. LauDecayTimePdf is a class that provides the PDFs for describing the time-dependence of the various terms in a particle/antiparticle decay to a common final state. The various terms have the form of exponentially decaying trigonometric or hyperbolic functions convolved with a N-Gaussian resolution function. */ #ifndef LAU_DECAYTIME_PDF #define LAU_DECAYTIME_PDF -#include #include -#include +#include +#include #include "TString.h" + #include "LauAbsRValue.hh" -#include "LauFitDataTree.hh" #include "LauComplex.hh" +#include "LauDecayTime.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauDecayTimeResolution.hh" +#include "LauAbsDecayTimeCalculator.hh" +#include "LauAbsDecayTimeEfficiency.hh" +#include "LauAbsDecayTimeIntegrator.hh" +#include "LauFitDataTree.hh" +#include "LauBinnedDecayTimeEfficiency.hh" +#include "LauSplineDecayTimeEfficiency.hh" +#include "LauUniformDecayTimeEfficiency.hh" +#include "LauNonSmearedSplineEfficiencyDecayTimeIntegrator.hh" +#include "LauSmearedSplineEfficiencyDecayTimeIntegrator.hh" class TH1; class Lau1DHistPdf; -class Lau1DCubicSpline; class LauKinematics; // TODO - Should this have Pdf in the name? // - Audit function names and public/private access category // - Audit what should be given to constructor and what can be set later (maybe different constructors for different scenarios, e.g. smeared with per-event error/smeared with avg error/not smeared) class LauDecayTimePdf final { public: - // TODO - can we think of better names? - //! The functional form of the decay time PDF - enum FuncType { - Hist, //< Hist PDF for fixed background - Delta, //< Delta function - for prompt background - Exp, //< Exponential function - for non-prompt background or charged B's - DeltaExp, //< Delta + Exponential function - for background with prompt and non-prompt parts - ExpTrig, //< Exponential function with Delta m driven mixing - for neutral B_d's - ExpHypTrig //< Exponential function with both Delta m and Delta Gamma driven mixing - for neutral B_s's - }; - - //! How is the decay time measured - absolute or difference? - enum TimeMeasurementMethod { - DecayTime, //< Absolute measurement of decay time, e.g. LHCb scenario - DecayTimeDiff //< Measurement of the difference of two decay times, e.g. BaBar/Belle(II) scenario - }; - - //! How is the TD efficiency information going to be given? - enum EfficiencyMethod { - Spline, //< As a cubic spline - Binned, //< As a histogram (TH1D/TH1F) - Flat //< As a flat distribution (constant) - }; - - //! Constructor + //! Constructor for FuncType::Hist form /*! + In this form the decay time error distribution is not supplied. + NB only to be used if ALL decay time functions in the model do not use the deacy time error. + \param [in] theVarName the name of the decay time variable in the input data - \param [in] theVarErrName the name of the decay time error variable in the input data - \param [in] params the parameters of the PDF - \param [in] minAbscissaVal the minimum value of the abscissa - \param [in] maxAbscissaVal the maximum value of the abscissa - \param [in] minAbscissaErr the minimum value of the abscissa error - \param [in] maxAbscissaErr the maximum value of the abscissa error - \param [in] type the functional form of the PDF - \param [in] nGauss the number of Gaussians in the resolution function - \param [in] scale controls whether the Gaussian parameters are scaled by the per-event error - \param [in] method set the type of the time measurement used in the given experiment + \param [in] minAbscissaVal the minimum value of the decay time + \param [in] maxAbscissaVal the maximum value of the decay time + \param [in] dtHist the histogram to define the decay time dependence + \param [in] method set the type of the time measurement used in the given experiment (defaults to absolute decay time measurement) */ - LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, - const Double_t minAbscissaVal, const Double_t maxAbscissaVal, - const Double_t minAbscissaErr, const Double_t maxAbscissaErr, - const FuncType type, const UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); + LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + const TH1* dtHist, + const LauDecayTime::TimeMeasurementMethod method = LauDecayTime::TimeMeasurementMethod::DecayTime); - //! Constructor + //! Constructor for FuncType::Hist form /*! + In this form the decay time error distribution is also supplied. + If other components in your model use the decay time error, you muse use this form. + \param [in] theVarName the name of the decay time variable in the input data + \param [in] minAbscissaVal the minimum value of the decay time + \param [in] maxAbscissaVal the maximum value of the decay time \param [in] theVarErrName the name of the decay time error variable in the input data - \param [in] params the parameters of the PDF - \param [in] minAbscissaVal the minimum value of the abscissa - \param [in] maxAbscissaVal the maximum value of the abscissa - \param [in] minAbscissaErr the minimum value of the abscissa error - \param [in] maxAbscissaErr the maximum value of the abscissa error - \param [in] type the functional form of the PDF - \param [in] nGauss the number of Gaussians in the resolution function - \param [in] scaleMeans controls whether the Gaussian mean parameters are scaled by the per-event error - \param [in] scaleWidths controls whether the Gaussian width parameters are scaled by the per-event error - \param [in] method set the type of the time measurement used in the given experiment + \param [in] minAbscissaErr the minimum value of the decay time error + \param [in] maxAbscissaErr the maximum value of the decay time error + \param [in] dtHist the histogram to define the decay time dependence + \param [in] dtErrHist the histogram to define the decay time error dependence + \param [in] method set the type of the time measurement used in the given experiment (defaults to absolute decay time measurement) + */ + LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + const TString& theVarErrName, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, + const TH1* dtHist, + const TH1* dtErrHist, + const LauDecayTime::TimeMeasurementMethod method = LauDecayTime::TimeMeasurementMethod::DecayTime); + + //! Constructor for parametric forms, in absence of decay time resolution + /*! + \param [in] theVarName the name of the decay time variable in the input data + \param [in] minAbscissaVal the minimum value of the decay time + \param [in] maxAbscissaVal the maximum value of the decay time + \param [in] physicsModel the functional form of the model + \param [in] method set the type of the time measurement used in the given experiment (defaults to absolute decay time measurement) */ - LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, - const Double_t minAbscissaVal, const Double_t maxAbscissaVal, - const Double_t minAbscissaErr, const Double_t maxAbscissaErr, - const FuncType type, const UInt_t nGauss, const std::vector& scaleMeans, - const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod = EfficiencyMethod::Spline); + LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + std::unique_ptr physicsModel, + const LauDecayTime::TimeMeasurementMethod method = LauDecayTime::TimeMeasurementMethod::DecayTime); + + //! Constructor for parametric forms, with decay time resolution (without use of event-by-event error) + /*! + \param [in] theVarName the name of the decay time variable in the input data + \param [in] minAbscissaVal the minimum value of the decay time + \param [in] maxAbscissaVal the maximum value of the decay time + \param [in] physicsModel the functional form of the physics model + \param [in] resolutionModel the functional form of the resolution model + \param [in] method set the type of the time measurement used in the given experiment (defaults to absolute decay time measurement) + */ + LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + std::unique_ptr physicsModel, + std::unique_ptr resolutionModel, + const LauDecayTime::TimeMeasurementMethod method = LauDecayTime::TimeMeasurementMethod::DecayTime); + + //! Constructor for parametric forms, with decay time resolution (including use of event-by-event error) + /*! + \param [in] theVarName the name of the decay time variable in the input data + \param [in] minAbscissaVal the minimum value of the decay time + \param [in] maxAbscissaVal the maximum value of the decay time + \param [in] theVarErrName the name of the decay time error variable in the input data + \param [in] minAbscissaErr the minimum value of the decay time error + \param [in] maxAbscissaErr the maximum value of the decay time error + \param [in] physicsModel the functional form of the physics model + \param [in] resolutionModel the functional form of the resolution model + \param [in] dtErrHist the histogram to define the decay time error dependence + \param [in] method set the type of the time measurement used in the given experiment (defaults to absolute decay time measurement) + */ + LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + const TString& theVarErrName, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, + std::unique_ptr physicsModel, + std::unique_ptr resolutionModel, + const TH1* dtErrHist, + const LauDecayTime::TimeMeasurementMethod method = LauDecayTime::TimeMeasurementMethod::DecayTime); //! Copy constructor (deleted) LauDecayTimePdf(const LauDecayTimePdf& other) = delete; //! Copy assignment operator (deleted) LauDecayTimePdf& operator=(const LauDecayTimePdf& other) = delete; //! Move constructor (deleted) LauDecayTimePdf(LauDecayTimePdf&& other) = delete; //! Move assignment operator (deleted) LauDecayTimePdf& operator=(LauDecayTimePdf&& other) = delete; //! Destructor - ~LauDecayTimePdf(); + ~LauDecayTimePdf() = default; - // TODO - Do we need this? - // - If so, should it be a hist or a LauAbsPdf? - // - Or should there be a dedicated constructor for this scenario? - //! Set the Histogram PDF in case of fixed background PDF - void setHistoPdf(const TH1* hist); - - // TODO - should this be a LauAbsPdf instead? - //! Set the histogram to be used for generation of per-event decay time errors - /*! - If not set will fall back to using Landau distribution - - \param [in] hist the histogram of the distribution - */ - void setErrorHisto(const TH1* hist); - - // TODO - do we still want this option? - //! Set the parameters of the Landau distribution used to generate the per-event decay time errors + //! Set the efficiency model to be in binned form /*! - \param [in] mpv the MPV (most probable value) of the distribution - \param [in] sigma the width of the distribution + \param [in] effModel the binned efficiency model */ - void setErrorDistTerms(const Double_t mpv, const Double_t sigma) { - errorDistMPV_ = mpv; - errorDistSigma_ = sigma; - } + void setBinnedEfficiency( std::unique_ptr effModel ); - // TODO - should we remove the EfficiencyMethod argument from the constructor, default to Flat and have these functions modify it? - //! Set the efficiency function in the form of a histogram + //! Set the efficiency model to be in spline interpolated form /*! - \param [in] hist the histogram of efficiencies + \param [in] effModel the spline-interpolated efficiency model */ - void setEffiHist(const TH1* hist); + template + void setSplineEfficiency( std::unique_ptr> effModel ); - //! Set the efficiency function in the form of spline - /*! - \param [in] spline the efficiency spline function - */ - void setEffiSpline(Lau1DCubicSpline* spline); + //! Get FuncType from model + LauDecayTime::FuncType getFuncType() const {return type_;} //! Retrieve the name of the variable const TString& varName() const {return varName_;} //! Retrieve the name of the error variable const TString& varErrName() const {return varErrName_;} - // TODO - this should probably be set at construction time - //! Turn on or off the resolution function - void doSmearing(Bool_t smear) {smear_ = smear;} - + // TODO - not sure we need this anymore (at least as a public function) //! Determine if the resolution function is turned on or off - Bool_t doSmearing() const {return smear_;} - - // TODO - we don't use this at the moment - remove it? - //! Calculate single effective decay time resolution from multiple Gaussian resolution functions - /*! - \return effective resolution - */ - Double_t effectiveResolution() const; + bool doSmearing() const {return smear_;} //! Cache information from data /*! \param [in] inputData the dataset to be used to calculate everything */ void cacheInfo(const LauFitDataTree& inputData); //! Calculate the likelihood (and all associated information) given value of the abscissa /*! \param [in] abscissa the value of the abscissa */ void calcLikelihoodInfo(const Double_t abscissa); //! Calculate the likelihood (and all associated information) given value of the abscissa and its error /*! \param [in] abscissa the value of the abscissa \param [in] abscissaErr the error on the abscissa */ void calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr); //! Retrieve the likelihood (and all associated information) given the event number /*! \param [in] iEvt the event number */ - void calcLikelihoodInfo(const UInt_t iEvt); + void calcLikelihoodInfo(const std::size_t iEvt); + // TODO shouldn't be public (maybe will be removed anyway) //! Determine the efficiency value for the given abscissa /*! \param [in] abscissa the value of the abscissa \return the corresponding efficiency value */ Double_t calcEffiTerm( const Double_t abscissa ) const; - //! Get FuncType from model - FuncType getFuncType() const {return type_;} - // TODO - should maybe do away with exp term (and it's norm) since it's just the cosh term when DG=0 and it's confusing to have both // - counter argument is to keep it for backgrounds that have a lifetime-like behaviour + // - need to make final decision on this - it is kept for now //! Get the exponential term - Double_t getExpTerm() const {return expTerm_;} + Double_t getExpTerm() const {return terms_.expTerm;} //! Get the cos(Dm*t) term (multiplied by the exponential) - Double_t getCosTerm() const {return cosTerm_;} + Double_t getCosTerm() const {return terms_.cosTerm;} //! Get the sin(Dm*t) term (multiplied by the exponential) - Double_t getSinTerm() const {return sinTerm_;} + Double_t getSinTerm() const {return terms_.sinTerm;} //! Get the cosh(DG/2*t) term (multiplied by the exponential) - Double_t getCoshTerm() const {return coshTerm_;} + Double_t getCoshTerm() const {return terms_.coshTerm;} //! Get the sinh(DG/2*t) term (multiplied by the exponential) - Double_t getSinhTerm() const {return sinhTerm_;} + Double_t getSinhTerm() const {return terms_.sinhTerm;} //! Get the hist term from a histogram - Double_t getHistTerm() const {return pdfTerm_;} + Double_t getHistTerm() const {return histTerm_;} //! Get the normalisation related to the exponential term only - Double_t getNormTermExp() const {return normTermExp_;} + Double_t getNormTermExp() const {return normTerms_.expTerm;} //! Get the normalisation related to the cos term only - Double_t getNormTermCos() const {return normTermCos_;} + Double_t getNormTermCos() const {return normTerms_.cosTerm;} //! Get the normalisation related to the sin term only - Double_t getNormTermSin() const {return normTermSin_;} + Double_t getNormTermSin() const {return normTerms_.sinTerm;} //! Get the first term in the normalisation (from integrating the cosh) - Double_t getNormTermCosh() const {return normTermCosh_;} + Double_t getNormTermCosh() const {return normTerms_.coshTerm;} //! Get the second term in the normalisation (from integrating the sinh) - Double_t getNormTermSinh() const {return normTermSinh_;} + Double_t getNormTermSinh() const {return normTerms_.sinhTerm;} //! Get error probability density from Error distribution Double_t getErrTerm() const{return errTerm_;} //! Get efficiency probability density from efficiency distribution Double_t getEffiTerm() const{return effiTerm_;} //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ const std::vector& getParameters() const { return params_; } //! Retrieve the parameters of the PDF, e.g. so that they can be loaded into a fit /*! \return the parameters of the PDF */ std::vector& getParameters() { return params_; } //! Update the pulls for all parameters void updatePulls(); - //! Calculate the normalisation of all terms - void calcNorm(); - - //! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency - /*! - This form to be used for case where decay time resolution is neglected - - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \param [in] weight the weight for this range, typically the efficiency value - */ - void calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight); - - //! Calculate the normalisation integrals in the given range for the case of uniform or binned efficiency - /*! - This form to be used for case where decay time resolution is accounted for - - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \param [in] weight the weight for this range, typically the efficiency value - \param [in] means the mean values of each Gaussian in the resolution function - \param [in] sigmas the width values of each Gaussian in the resolution function - \param [in] fractions the fractional weight of each Gaussian in the resolution function - */ - void calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions); - - //! Calculate the normalisation integrals in the given range for the case of spline efficiency - /*! - This form to be used for case where decay time resolution is accounted for - - \param [in] iEvt the event number (for the case of using per-event decay-time error) - \param [in] splineIndex the index of the spline segment being integrated - \param [in] means the mean values of each Gaussian in the resolution function - \param [in] sigmas the width values of each Gaussian in the resolution function - \param [in] fractions the fractional weight of each Gaussian in the resolution function - */ - void calcSmearedSplinePartialIntegrals(const UInt_t iEvt, const UInt_t splineIndex, const std::vector& means, const std::vector& sigmas, const std::vector& fractions); - - //! Calculate the normalisation integrals in the given range for the case of spline efficiency - /*! - This form to be used for case where decay time resolution is neglected - - \param [in] splineIndex the index of the spline segment being integrated - */ - void calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex); - - //! Calculate normalisation for non-smeared cos and sin terms - /*! - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \return pair of {cosTermIntegral, sinTermIntegral} - */ - std::pair nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs); - - //! Calculate normalisation for non-smeared cosh and sinh terms - /*! - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \return pair of {coshTermIntegral, sinhTermIntegral} - */ - std::pair nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs); - - //! Calculate normalisation for non-smeared exponential term - /*! - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \return integral - */ - Double_t nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs); - - //! Calculate normalisation for decay-time resolution smeared terms - /*! - Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748 - - \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \param [in] sigmaOverRoot2 width of the Gaussian resolution function, divided by sqrt(2) - \param [in] mu mean of the Gaussian resolution function - \return complex integral - */ - std::complex smearedGeneralIntegral(const std::complex& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu); - - //! Calculate decay-time resolution smeared terms - /*! - Uses the Faddeeva function method from Section 3 of https://arxiv.org/abs/1407.0748 - - \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) - \param [in] x = ( t - mu ) / ( sqrt(2) * sigma ) - \return complex smeared term - */ - std::complex smearedGeneralTerm(const std::complex& z, const Double_t x); - - //! Calculate and cache powers of means and sigmas for each Gaussian in the resolution function - /* - \param [in] iEvt the event number (for the case of using per-event decay-time error) - \param [in] means mean of each Gaussian in the resolution function - \param [in] sigmas width of each Gaussian in the resolution function - */ - void calcMeanAndSigmaPowers( const UInt_t iEvt, const std::vector& means, const std::vector& sigmas ); - - //! Calculate and cache K-vectors for each term and for each Gaussian in the resolution function - /*! - \param [in] iEvt the event number (for the case of using per-event decay-time error) - */ - void calcKVectors( const UInt_t iEvt ); - - //! Generate the K vector used in eqn 31 from arXiv:1407.0748 - /* - \param [in] sigma width of the Gaussian resolution function - \param [in] z The z value, changing for exp, sin, sinh, etc - \return size 4 array of vector values - */ - std::array,4> generateKvector(const std::complex& z); - - //! Generate the M vector used in eqn 31 from arXiv:1407.0748 - /* - Uses the using the Faddeeva function method from - (https://arxiv.org/abs/1407.0748) - - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \param [in] z the complex expression with general form: (Gamma - i Delta_m) * sigma / sqrt(2) - \param [in] sigma width of the Gaussian resolution function - \param [in] mu mean of the Gaussian resolution function - \return size 4 array of vector values - */ - std::array,4> generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mu = 0.); - - //! Calculate the normalisation of a given term in a particular spline segment - /* - \param [in] coeffs spline coefficients in this segment - \param [in] K K-vector for this term - \param [in] M M-vector for this term - \param [in] sigmaPowers powers of the width of the Gaussian resolution function - \param [in] meanPowers powers of the mean of the Gaussian resolution function - \return the complex normalisation - */ - std::complex smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const; - - //! Calculate integrals of each power of t within a given spline segment - /* - \param [in] k the power of t - \param [in] minAbs lower bound for the integral domain - \param [in] maxAbs upper bound for the integral domain - \param [in] u the complex expression with general form: (Gamma - i Delta_m) - \return the complex integral - */ - std::complex calcIk(const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u); - - //! Calculate the normalisation of a given term in a particular spline segment - /* - \param [in] splineIndex the index of the spline segment being integrated - \param [in] u the complex expression with general form: (Gamma - i Delta_m) - \param [in] cache cached results of calcIk, to be used and/or updated as appropriate - \return the complex normalisation - */ - std::complex nonSmearedSplineNormalise(const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache); - //! Generate the value of the error /*! If scaling by the error should call this before calling generate \param [in] forceNew forces generation of a new value \return the generated decay time error */ - Double_t generateError(const Bool_t forceNew = kFALSE); + Double_t generateError(const bool forceNew = kFALSE); //! Generate the value of the decay time /*! NB this only makes sense for cases where the decay time and DP are not intrinsically linked, i.e. not the ExpTrig and ExpHypTrig cases. Simple correlations with the DP position (e.g. of the resolution function parameters) can be handled using the kinematics parameter. \param [in] kinematics allows to determine the DP position \return the generated decay time */ Double_t generate(const LauKinematics* kinematics); //! Determine the maximum height of the PDF /*! NB this only makes sense for cases where the decay time and DP are not intrinsically linked, i.e. not the ExpTrig and ExpHypTrig cases. Simple correlations with the DP position (e.g. of the resolution function parameters) can be handled using the kinematics parameter. \param [in] kinematics allows to determine the DP position \return the PDF maximum */ Double_t getMaxHeight(const LauKinematics* kinematics); //! Retrieve the decay time minimum value Double_t minAbscissa() const {return minAbscissa_;} //! Retrieve the decay time maximum value Double_t maxAbscissa() const {return maxAbscissa_;} //! Retrieve the decay time error minimum value Double_t minAbscissaError() const {return minAbscissaError_;} //! Retrieve the decay time error maximum value Double_t maxAbscissaError() const {return maxAbscissaError_;} - // TODO - can we delete this? - // NB calcPDFHeight only calculates the gaussian information for the (type_ == Delta) case - //! Calculate the maximum height of the PDF - //void calcPDFHeight( const LauKinematics* kinematics ); - - //! Get efficiency parameters to float in the fit - std::vector& getEffiPars() {return effiPars_;} - //! Propagate any updates necessary to the decay time Efficiency and recalculate normalisation if necessary void propagateParUpdates(); - // TODO - can we delete this? - //! Update spline Y values when floating the decay time acceptance - /*! - \param [in] params the vector of LauParameters describing the Y values - */ - //void updateEffiSpline(const std::vector& params); - //! Set up the initial state correctly - called by the fit model's initialise function void initialise(); - protected: - //! Calculate the pure physics terms with no resolution function applied - void calcNonSmearedTerms(const Double_t abscissa); - //! Retrieve the number of PDF parameters /*! \return the number of PDF parameters */ - UInt_t nParameters() const {return params_.size();} + std::size_t nParameters() const {return params_.size();} + + private: + //! Update the cache values for all events + void updateCache(); - //! Retrieve the specified parameter + //! Determine if any parameters in the supplied container are floating /*! - \param [in] parName the parameter to retrieve + \param [in] pars the container of parameters + \return true if any parameter is floating, false if none are */ - LauAbsRValue* findParameter(const TString& parName); + bool anyParFloating( const std::vector& pars ) const; - //! Retrieve the specified parameter + //! Add all parameters in the supplied container to our internal store /*! - \param [in] parName the parameter to retrieve + \param [in] pars the container of parameters */ - const LauAbsRValue* findParameter(const TString& parName) const; + void addParams( std::vector& pars ); - //! Update the cache values for all events - void updateCache(); - private: - //! Name of the variable - const TString varName_; + //! Which type of decay time function is this? + const LauDecayTime::FuncType type_; - //! Name of the error variable - const TString varErrName_; + //! Are we using absolute decay time or decay time difference? + const LauDecayTime::TimeMeasurementMethod method_{LauDecayTime::TimeMeasurementMethod::DecayTime}; - //! The parameters of the PDF - std::vector params_; + //! Name of the decay time variable + const TString varName_; - // TODO - should probably set this at construction time (can then be const) - //! Smear with the resolution model or not - Bool_t smear_; + //! Name of the decay time error variable + const TString varErrName_{""}; //! The minimum value of the decay time const Double_t minAbscissa_; //! The maximum value of the decay time const Double_t maxAbscissa_; //! The minimum value of the decay time error - const Double_t minAbscissaError_; + const Double_t minAbscissaError_{0.0}; //! The maximum value of the decay time error - const Double_t maxAbscissaError_; - - //! The current value of the decay time error - Double_t abscissaError_; - //! Flag whether a value for the decay time error has been generated - Bool_t abscissaErrorGenerated_; - - //! Value of the MPV of the Landau dist used to generate the Delta t error - Double_t errorDistMPV_; - //! Value of the width of the Landau dist used to generate the Delta t error - Double_t errorDistSigma_; - - //! The number of gaussians in the resolution model - const UInt_t nGauss_; - - // Parameters of the gaussian(s) that accounts for the resolution: - //! mean (offset) of each Gaussian in the resolution function - std::vector mean_; - //! spread (sigma) of each Gaussian in the resolution function - std::vector sigma_; - //! fraction of each Gaussian in the resolution function - std::vector frac_; - - // Parameters of the physics decay time distribution - //! Lifetime parameter - LauAbsRValue* tau_; - //! Mass difference parameter - LauAbsRValue* deltaM_; - //! Width difference parameter - LauAbsRValue* deltaGamma_; - - //! Parameter for the fraction of prompt events in DeltaExp - LauAbsRValue* fracPrompt_; + const Double_t maxAbscissaError_{0.0}; + + //! The physics model + std::unique_ptr physicsModel_; + //! The resolution model + std::unique_ptr resolutionModel_; + //! The efficiency model + std::unique_ptr efficiencyModel_; + //! The calculator of the numerator terms + std::unique_ptr calculator_; + //! The calculator of the normalisation terms + std::unique_ptr integrator_; - //! Which type of decay time function is this? - const FuncType type_; + //! The parameters of the PDF + std::vector params_; - //! Are we using absolute decay time or decay time difference? - const TimeMeasurementMethod method_; + //! Smear with a resolution model? + const bool smear_{false}; - //! Which method for eff(decaytime) input are we using? - const EfficiencyMethod effMethod_; + //! Resolution uses per-event error? + const bool scaleWithPerEventError_{false}; - //! Scale the mean of each Gaussian by the per-event decay time error? - const std::vector scaleMeans_; - //! Scale the sigma of each Gaussian by the per-event decay time error? - const std::vector scaleWidths_; - //! Is anything being scaled by the per-event decay time error? - const Bool_t scaleWithPerEventError_; + //! The current value of the decay time error + Double_t abscissaError_{0.0}; + //! Flag whether a value for the decay time error has been generated + bool abscissaErrorGenerated_{false}; + /* //! The exp(-G*t) term - Double_t expTerm_; + Double_t expTerm_{0.0}; //! The cos(Dm*t) term (multiplied by the exponential) - Double_t cosTerm_; + Double_t cosTerm_{0.0}; //! The sin(Dm*t) term (multiplied by the exponential) - Double_t sinTerm_; + Double_t sinTerm_{0.0}; //! The cosh(DG/2*t) term (multiplied by the exponential) - Double_t coshTerm_; + Double_t coshTerm_{0.0}; //! The sinh(DG/2*t) term (multiplied by the exponential) - Double_t sinhTerm_; + Double_t sinhTerm_{0.0}; + */ + LauDecayTimeTerms terms_; + + /* //! Normalisation of the exponential term - Double_t normTermExp_; + Double_t normTermExp_{0.0}; //! Normalisation of the cos term - Double_t normTermCos_; + Double_t normTermCos_{0.0}; //! Normalisation of the sin term - Double_t normTermSin_; + Double_t normTermSin_{0.0}; //! Normalisation of the cosh term - Double_t normTermCosh_; + Double_t normTermCosh_{0.0}; //! Normalisation of the sinh term - Double_t normTermSinh_; + Double_t normTermSinh_{0.0}; + */ + LauDecayTimeNormTerms normTerms_; + //! Error PDF (NB there is no equivalent cache since the PDF errHist_ keeps a cache) - Double_t errTerm_; + Double_t errTerm_{0.0}; //! Efficiency - Double_t effiTerm_; + Double_t effiTerm_{0.0}; - //TODO : to be deleted? or needed for backgrounds? //! Hist PDF term (NB there is no equivalent cache since the PDF pdfHist_ keeps a cache) - Double_t pdfTerm_; + Double_t histTerm_{0.0}; //! The maximum height of the PDF (used for generating in Exp, Delta, DeltaExp cases) - Double_t maxHeight_; + Double_t maxHeight_{0.0}; //! Flag to indicate if the maxHeight_ value it up-to-date - Bool_t heightUpToDate_; + bool heightUpToDate_{false}; //! The cache of the decay times std::vector abscissas_; //! The cache of the per-event errors on the decay time std::vector abscissaErrors_; + /* //! The cache of the exponential terms std::vector expTerms_; //! The cache of the exponential * cosh(DG/2*t) terms std::vector coshTerms_; //! The cache of the exponential * sinh(DG/2*t) terms std::vector sinhTerms_; //! The cache of the exponential * cos(Dm*t) terms std::vector cosTerms_; //! The cache of the exponential * sin(Dm*t) terms std::vector sinTerms_; //! The cache of the exponential normalisation std::vector normTermsExp_; //! The cache of the cosh term normalisation std::vector normTermsCosh_; //! The cache of the sinh term normalisation std::vector normTermsSinh_; //! The cache of the cos term normalisation std::vector normTermsCos_; //! The cache of the sin term normalisation std::vector normTermsSin_; + */ + + //! The cache of the numerator terms + std::vector termsStore_; + + //! The cache of the normalisation terms + std::vector normTermsStore_; //! The cache of the efficiency std::vector effiTerms_; - //! Histogram PDF for abscissa error distribution - Lau1DHistPdf* errHist_; - //! Histogram PDF for abscissa distribution - Lau1DHistPdf* pdfHist_; - - //! efficiency PDF in spline - Lau1DCubicSpline* effiFun_; + std::unique_ptr pdfHist_; - //! efficiency PDF as Histogram - TH1* effiHist_; - - //! Vector of parameters to float acceptance - std::vector effiPars_; + //! Histogram PDF for abscissa error distribution + std::unique_ptr errHist_; // Caching / bookkeeping - //! Binomial coefficients - // TODO - would prefer this to use std::array but cling doesn't like it - static constexpr Double_t binom_[4][4] = { - {1., 0., 0., 0.}, - {1., 1., 0., 0.}, - {1., 2., 1., 0.}, - {1., 3., 3., 1.} - }; - - Bool_t nothingFloating_{kFALSE}; - Bool_t anyKnotFloating_{kTRUE}; - Bool_t nonKnotFloating_{kTRUE}; - Bool_t physicsParFloating_{kTRUE}; - Bool_t tauFloating_{kTRUE}; - Bool_t deltaMFloating_{kTRUE}; - Bool_t deltaGammaFloating_{kTRUE}; - Bool_t resoParFloating_{kTRUE}; - //std::vector meansFloating_; - //std::vector sigmasFloating_; - //std::vector fracsFloating_; - - Bool_t nothingChanged_{kFALSE}; - Bool_t anyKnotChanged_{kTRUE}; - Bool_t nonKnotChanged_{kTRUE}; - Bool_t physicsParChanged_{kTRUE}; - Bool_t tauChanged_{kTRUE}; - Bool_t deltaMChanged_{kTRUE}; - Bool_t deltaGammaChanged_{kTRUE}; - Bool_t resoParChanged_{kTRUE}; - //std::vector meansChanged_; - //std::vector sigmasChanged_; - //std::vector fracsChanged_; - - Double_t tauVal_{0.0}; - Double_t gammaVal_{0.0}; - Double_t deltaMVal_{0.0}; - Double_t deltaGammaVal_{0.0}; - std::vector meanVals_; - std::vector sigmaVals_; - std::vector fracVals_; - std::vector effiParVals_; - - // vector has size nSplineSegments, array has 0th - 3rd powers - std::vector,4>> expTermIkVals_; - std::vector,4>> trigTermIkVals_; - std::vector,4>> hypHTermIkVals_; - std::vector,4>> hypLTermIkVals_; - - // outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0th - 3rd or 1st - 4th powers, respectively - std::vector>> meanPowerVals_; - std::vector>> sigmaPowerVals_; - - // outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0th - 4th entries of the K-vector - std::vector,4>>> expTermKvecVals_; - std::vector,4>>> trigTermKvecVals_; - std::vector,4>>> hypHTermKvecVals_; - std::vector,4>>> hypLTermKvecVals_; - - // outer vector has nEvents entries, middle vector has nSplineSegments entries, inner vector has nGauss_ entries, array has 0th - 4th entries of the M-vector - std::vector,4>>>> expTermMvecVals_; - std::vector,4>>>> trigTermMvecVals_; - std::vector,4>>>> hypHTermMvecVals_; - std::vector,4>>>> hypLTermMvecVals_; + bool anythingFloating_{false}; + bool physicsParFloating_{false}; + bool resoParFloating_{false}; + bool effiParFloating_{false}; + + bool anythingChanged_{false}; + bool physicsParChanged_{false}; + bool resoParChanged_{false}; + bool effiParChanged_{false}; ClassDef(LauDecayTimePdf,0) // Define the Delta t PDF }; +template +void LauDecayTimePdf::setSplineEfficiency( std::unique_ptr> effModel ) +{ + if ( not effModel ) { + std::cerr << "WARNING in LauDecayTimePdf::setSplineEfficiency : supplied spline efficiency model pointer is null" << std::endl; + return; + } + + // Create the approptiate integrator + // NB need to use effModel here since it needs to be of concrete type + if ( smear_ ) { + integrator_ = std::make_unique>( minAbscissa_, maxAbscissa_, *physicsModel_, *effModel, *resolutionModel_ ); + } else { + integrator_ = std::make_unique>( minAbscissa_, maxAbscissa_, *physicsModel_, *effModel ); + } + + // Store the efficiency model (as a pointer to base) + efficiencyModel_ = std::move(effModel); +} + #endif diff --git a/inc/LauDecayTimePhysicsModel.hh b/inc/LauDecayTimePhysicsModel.hh new file mode 100644 index 0000000..a496316 --- /dev/null +++ b/inc/LauDecayTimePhysicsModel.hh @@ -0,0 +1,220 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauDecayTimePhysicsModel.hh + \brief File containing declaration of LauDecayTimePhysicsModel class. +*/ + +#ifndef LAU_DECAYTIME_PHYSICSMODEL +#define LAU_DECAYTIME_PHYSICSMODEL + +#include + +#include "Rtypes.h" + +#include "LauDecayTime.hh" + +class LauAbsRValue; + +/*! \class LauDecayTimePhysicsModel + \brief Class for defining the physics and associated parameters for the decay time model +*/ + +class LauDecayTimePhysicsModel final { + + public: + //! Constructor + /*! + \param type the type of the physics model + \param parameters the parameters of the physics model + */ + LauDecayTimePhysicsModel( const LauDecayTime::FuncType type, std::vector parameters ); + + //! Retrieve the type of the physics function + /*! + \return the function type + */ + LauDecayTime::FuncType getFunctionType() const { return type_; } + + //! Retrieve the parameters of the physics model so that they can be loaded into a fit + /*! + \return the parameters of the physics model + */ + std::vector getParameters() { return params_; } + + //! Retrieve the up-to-date value of the lifetime + /*! + \return the lifetime value + */ + const Double_t& getLifetimeValue() const { return tauVal_; } + + //! Retrieve the up-to-date value of the inverse lifetime + /*! + \return the inverse lifetime value + */ + const Double_t& getGammaValue() const { return gammaVal_; } + + //! Retrieve the up-to-date value of the mass difference + /*! + \return the mass difference value + */ + const Double_t& getDeltaMValue() const { return deltaMVal_; } + + //! Retrieve the up-to-date value of the width difference + /*! + \return the width difference value + */ + const Double_t& getDeltaGammaValue() const { return deltaGammaVal_; } + + //! Retrieve the up-to-date value of the prompt fraction + /*! + \return the prompt fraction value + */ + const Double_t& getFracPromptValue() const { return fracPromptVal_; } + + //! Retrieve whether any of the parameter values have changed in the last fit iteration + /*! + \return the any param changed flag + */ + const bool& anythingChanged() const { return anythingChanged_; } + + //! Retrieve whether the lifetime value has changed in the last fit iteration + /*! + \return the lifetime changed flag + */ + const bool& lifetimeChanged() const { return tauChanged_; } + + //! Retrieve the up-to-date value of the mass difference + /*! + \return the mass difference value + */ + const bool& deltaMChanged() const { return deltaMChanged_; } + + //! Retrieve the up-to-date value of the width difference + /*! + \return the width difference value + */ + const bool& deltaGammaChanged() const { return deltaGammaChanged_; } + + //! Retrieve the up-to-date value of the prompt fraction + /*! + \return the prompt fraction value + */ + const bool& fracPromptChanged() const { return fracPromptChanged_; } + + //! Initialise the parameter cache + /*! + Must be called prior to starting fitting or generation + */ + void initialise(); + + //! Propagate any updates to parameters and recalculate information as neeeded + /*! + Should be called at each fit iteration + */ + void propagateParUpdates(); + + private: + //! Retrieve the specified parameter + /*! + \param [in] parName the parameter to retrieve + */ + const LauAbsRValue* findParameter(const TString& parName) const; + + //! Retrieve the specified parameter + /*! + \param [in] parName the parameter to retrieve + */ + LauAbsRValue* findParameter(const TString& parName); + + //! Update the cached parameter values + void updateParameterCache(); + + //! Which type of decay time function is this? + const LauDecayTime::FuncType type_; + + //! Store of all parameters of the physics model + std::vector params_; + + //! Lifetime parameter + LauAbsRValue* tau_{nullptr}; + + //! Mass difference parameter + LauAbsRValue* deltaM_{nullptr}; + + //! Width difference parameter + LauAbsRValue* deltaGamma_{nullptr}; + + //! Parameter for the fraction of prompt events in DeltaExp + LauAbsRValue* fracPrompt_{nullptr}; + + //! Cached value of lifetime + Double_t tauVal_{0.0}; + + //! Cached value of 1/lifetime + Double_t gammaVal_{0.0}; + + //! Cached value of mass difference + Double_t deltaMVal_{0.0}; + + //! Cached value of width difference + Double_t deltaGammaVal_{0.0}; + + //! Cached value of prompt fraction + Double_t fracPromptVal_{0.0}; + + //! Is the lifetime floating in the fit? + bool tauFloating_{false}; + + //! Is the mass difference floating in the fit? + bool deltaMFloating_{false}; + + //! Is the width difference floating in the fit? + bool deltaGammaFloating_{false}; + + //! Is the prompt fraction floating in the fit? + bool fracPromptFloating_{false}; + + //! Are any of the physics parameters floating in the fit? + bool anythingFloating_{false}; + + //! Has the lifetime parameter changed in the last fit iteration? + bool tauChanged_{false}; + + //! Has the mass difference parameter changed in the last fit iteration? + bool deltaMChanged_{false}; + + //! Has the width difference parameter changed in the last fit iteration? + bool deltaGammaChanged_{false}; + + //! Has the prompt fraction parameter changed in the last fit iteration? + bool fracPromptChanged_{false}; + + //! Have any of the physics parameters changed in the last fit iteration? + bool anythingChanged_{false}; + + ClassDef(LauDecayTimePhysicsModel, 0) +}; + +#endif diff --git a/inc/LauDecayTimeResolution.hh b/inc/LauDecayTimeResolution.hh new file mode 100644 index 0000000..15b4d8c --- /dev/null +++ b/inc/LauDecayTimeResolution.hh @@ -0,0 +1,174 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauDecayTimeResolution.hh + \brief File containing declaration of LauDecayTimeResolution class. +*/ + +/*! \class LauDecayTimeResolution + \brief Class for defining the model for decay time resolution +*/ + +#ifndef LAU_DECAYTIME_RESOLUTION +#define LAU_DECAYTIME_RESOLUTION + +#include + +#include "Rtypes.h" + +class LauAbsRValue; + + +class LauDecayTimeResolution final { + + public: + //! Constructor + LauDecayTimeResolution( const std::size_t nGauss, const std::vector& resolutionParams, const bool scale = false ); + LauDecayTimeResolution( const std::size_t nGauss, const std::vector& resolutionParams, const std::vector& scale ); + LauDecayTimeResolution( const std::size_t nGauss, const std::vector& resolutionParams, const std::vector& scaleMeans, const std::vector& scaleWidths ); + + //! Retrieve the parameters of the resolution model so that they can be loaded into a fit + /*! + \return the parameters of the resolution model + */ + std::vector getParameters() { return params_; } + + //! Retrieve the number of Gaussians in the model + /*! + \return the number of Gaussians + */ + const std::size_t& nGauss() const { return nGauss_; } + + //! Retrieve whether any of the parameters of the resolution function scaled by the per-event error + /*! + \return whether scaling or not + */ + bool scaleWithPerEventError() const { return scaleWithPerEventError_; } + + //! Retrieve whether the mean of each Gaussian is scaled by the per-event decay time error + /*! + \return the mean scaling flags + */ + const std::vector& scaleMeans() const { return scaleMeans_; } + + //! Retrieve whether the width of each Gaussian is scaled by the per-event decay time error + /*! + \return the width scaling flags + */ + const std::vector& scaleWidths() const { return scaleWidths_; } + + //! Retrieve the up-to-date values of the means + /*! + \return the mean values + */ + const std::vector& getMeanValues() const { return meanVals_; } + + //! Retrieve the up-to-date values of the widths + /*! + \return the width values + */ + const std::vector& getWidthValues() const { return widthVals_; } + + //! Retrieve the up-to-date values of the fractions + /*! + \return the fraction values + */ + const std::vector& getFractionValues() const { return fractionVals_; } + + //! Retrieve whether any of the parameter values have changed in the last fit iteration + /*! + \return the any param changed flag + */ + const bool& anythingChanged() const { return anythingChanged_; } + + //! Initialise the parameter cache + /*! + Must be called prior to starting fitting or generation + */ + void initialise(); + + //! Propagate any updates to parameters and recalculate information as neeeded + /*! + Should be called at each fit iteration + */ + void propagateParUpdates(); + + private: + //! Retrieve the specified parameter + /*! + \param [in] parName the parameter to retrieve + */ + const LauAbsRValue* findParameter(const TString& parName) const; + + //! Retrieve the specified parameter + /*! + \param [in] parName the parameter to retrieve + */ + LauAbsRValue* findParameter(const TString& parName); + + //! Resize the internal vectors to match nGauss_ + void resizeVectors(); + + //! Do an initial sanity check of our setup + void checkSetup(); + + //! Update the cached parameter values + void updateParameterCache(); + + //! The number of Gaussians in the resolution function + const std::size_t nGauss_; + + //! Are any of the parameters of the resolution function scaled by the per-event error? + const bool scaleWithPerEventError_{false}; + //! Scale the mean of each Gaussian by the per-event decay time error? + const std::vector scaleMeans_; + //! Scale the width of each Gaussian by the per-event decay time error? + const std::vector scaleWidths_; + + //! Store of all parameters of the resolution function + std::vector params_; + + //! Fraction parameter for each Gaussian in the resolution function + std::vector fractions_; + //! Mean parameter for each Gaussian in the resolution function + std::vector means_; + //! Width parameter for each Gaussian in the resolution function + std::vector widths_; + + //! Fraction of each Gaussian in the resolution function + std::vector fractionVals_; + //! Mean of each Gaussian in the resolution function + std::vector meanVals_; + //! Width of each Gaussian in the resolution function + std::vector widthVals_; + + //! Is any parameter floating in the fit? + bool anythingFloating_{false}; + //! Has any floating parameter changed in the last fit iteration? + bool anythingChanged_{false}; + + ClassDef(LauDecayTimeResolution, 0) +}; + +#endif diff --git a/inc/LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.hh b/inc/LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.hh new file mode 100644 index 0000000..cdbfd08 --- /dev/null +++ b/inc/LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.hh @@ -0,0 +1,85 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.hh + \brief File containing declaration of LauNonSmearedBinnedEfficiencyDecayTimeIntegrator class. +*/ + +#ifndef LAU_NONSMEARED_BINNEDEFFICIENCY_DECAYTIME_INTEGRATOR +#define LAU_NONSMEARED_BINNEDEFFICIENCY_DECAYTIME_INTEGRATOR + +#include + +#include "Rtypes.h" + +#include "LauAbsDecayTimeIntegrator.hh" +#include "LauBinnedDecayTimeEfficiency.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauNonSmearedUniformEfficiencyDecayTimeIntegrator.hh" +#include "LauUniformDecayTimeEfficiency.hh" + +class LauParameter; + +/*! \class LauNonSmearedBinnedEfficiencyDecayTimeIntegrator + \brief Class for defining the decay time integrator for the case of no resolution and uniform efficiency +*/ + +class LauNonSmearedBinnedEfficiencyDecayTimeIntegrator : public LauAbsDecayTimeIntegrator { + + public: + //! Constructor + LauNonSmearedBinnedEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauBinnedDecayTimeEfficiency& effModel ); + + //! Cache information from data + /*! + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Retrieve the normalisation terms (optionally for a given event) + /*! + \param [in] iEvt the event index + */ + LauDecayTimeNormTerms getNormTerms( const std::size_t iEvt ) const override; + + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeNormTerms calcNormTerms( const std::array& abscissaError = {1.0,1.0} ) const override; + + private: + //! The collection of uniform efficiency objects which we pass to the integrators + std::vector efficiencies_; + + //! The collection of uniform efficiency integrator objects to which we delegate + std::vector integrators_; + + ClassDefOverride(LauNonSmearedBinnedEfficiencyDecayTimeIntegrator, 0) +}; + +#endif diff --git a/inc/LauNonSmearedDecayTimeCalculator.hh b/inc/LauNonSmearedDecayTimeCalculator.hh new file mode 100644 index 0000000..cdfcb4b --- /dev/null +++ b/inc/LauNonSmearedDecayTimeCalculator.hh @@ -0,0 +1,110 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauNonSmearedDecayTimeCalculator.hh + \brief File containing declaration of LauNonSmearedDecayTimeCalculator class. +*/ + +#ifndef LAU_NONSMEARED_DECAYTIME_CALCULATOR +#define LAU_NONSMEARED_DECAYTIME_CALCULATOR + +#include + +#include "Rtypes.h" + +#include "LauAbsDecayTimeCalculator.hh" +#include "LauDecayTimePhysicsModel.hh" + +class LauParameter; + +/*! \class LauNonSmearedDecayTimeCalculator + \brief Class for defining the abstract interface for calculating the terms of the decay time model +*/ + +class LauNonSmearedDecayTimeCalculator : public LauAbsDecayTimeCalculator { + + public: + //! Constructor + LauNonSmearedDecayTimeCalculator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel ); + + //! Cache information from data + /*! + \param [in] abscissas the values of the per-event decay time + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissas, const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Calculate the terms for given values of decay time and decay time error + /*! + \param [in] abscissa the decay time value + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeTerms calcTerms( const Double_t abscissa, const std::array& abscissaError ) override; + + //! Retrieve the terms for a given event + /*! + \param [in] iEvt the event index + */ + LauDecayTimeTerms getTerms( const std::size_t iEvt ) const override { return terms_[iEvt]; } + + private: + //! Recalculate the cached information + void updateCache(); + + //! The minimum value of the decay time + const Double_t minAbscissa_; + //! The maximum value of the decay time + const Double_t maxAbscissa_; + + //! The physics model + const LauDecayTimePhysicsModel& physModel_; + + //! Cached value of lifetime + const Double_t& tauVal_; + + //! Cached value of 1/lifetime + const Double_t& gammaVal_; + + //! Cached value of mass difference + const Double_t& deltaMVal_; + + //! Cached value of width difference + const Double_t& deltaGammaVal_; + + //! Have any of the physics parameters changed in the latest fit iteration + const bool& physicsParamChanged_; + + //! Cache of the decay time for each event + std::vector abscissas_; + + //! The cache of the terms for each event + std::vector terms_; + + ClassDefOverride(LauNonSmearedDecayTimeCalculator, 0) +}; + +#endif diff --git a/inc/LauNonSmearedSplineEfficiencyDecayTimeIntegrator.hh b/inc/LauNonSmearedSplineEfficiencyDecayTimeIntegrator.hh new file mode 100644 index 0000000..bcbd8ab --- /dev/null +++ b/inc/LauNonSmearedSplineEfficiencyDecayTimeIntegrator.hh @@ -0,0 +1,356 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauNonSmearedSplineEfficiencyDecayTimeIntegrator.hh + \brief File containing declaration of LauNonSmearedSplineEfficiencyDecayTimeIntegrator class. +*/ + +#ifndef LAU_NONSMEARED_SPLINEEFFICIENCY_DECAYTIME_INTEGRATOR +#define LAU_NONSMEARED_SPLINEEFFICIENCY_DECAYTIME_INTEGRATOR + +#include +#include +#include + +#include "Rtypes.h" +#include "TSystem.h" + +#include "LauAbsDecayTimeIntegrator.hh" +#include "LauDecayTime.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauSplineDecayTimeEfficiency.hh" + +class LauParameter; + +/*! \class LauNonSmearedSplineEfficiencyDecayTimeIntegrator + \brief Class for defining the decay time integrator for the case of no resolution and uniform efficiency +*/ + +template +class LauNonSmearedSplineEfficiencyDecayTimeIntegrator : public LauAbsDecayTimeIntegrator { + + public: + //! Constructor + LauNonSmearedSplineEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauSplineDecayTimeEfficiency& effModel ); + + //! Cache information from data + /*! + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Retrieve the normalisation terms (optionally for a given event) + /*! + \param [in] iEvt the event index + */ + LauDecayTimeNormTerms getNormTerms( [[maybe_unused]] const std::size_t iEvt ) const override { return normTerms_; } + + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeNormTerms calcNormTerms( const std::array& abscissaError = {1.0,1.0} ) const override; + + private: + //! The number of coefficients of each spline segment + static constexpr std::size_t nCoeffs { static_cast>(Order) + 1 }; + + using RealArray = std::array; + using Real2DArray = std::array; + using ComplexArray = std::array,nCoeffs>; + + void updateCoeffCache(); + void updateIkCache( const bool forceUpdate = false ); + std::complex sumIkTerms( const std::vector& cacheIk ) const; + void calcIntegrals( std::vector& cacheIk, const std::complex& u ); + std::complex calcIk( const std::size_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u ) const; + + //! The minimum value of the decay time + const Double_t minAbscissa_; + + //! The maximum value of the decay time + const Double_t maxAbscissa_; + + //! The physics model + const LauDecayTimePhysicsModel& physModel_; + + //! The efficiency model + const LauSplineDecayTimeEfficiency& effModel_; + + //! Cached value of lifetime + const Double_t& tauVal_; + + //! Cached value of 1/lifetime + const Double_t& gammaVal_; + + //! Cached value of mass difference + const Double_t& deltaMVal_; + + //! Cached value of width difference + const Double_t& deltaGammaVal_; + + //! Have any of the physics parameters changed in the latest fit iteration + const bool& physicsParamChanged_; + + //! Has the lifetime changed in the latest fit iteration + const bool& tauChanged_; + + //! Has the mass difference changed in the latest fit iteration + const bool& deltaMChanged_; + + //! Has the width difference changed in the latest fit iteration + const bool& deltaGammaChanged_; + + //! Have any of the efficiency parameters changed in the latest fit iteration + const bool& effParamChanged_; + + RealArray factorial_; + + Real2DArray binomial_; + + std::vector coeffs_; + + std::vector expTermIkVals_; + std::vector trigTermIkVals_; + std::vector hypHTermIkVals_; + std::vector hypLTermIkVals_; + + LauDecayTimeNormTerms normTerms_; + + ClassDefOverride(LauNonSmearedSplineEfficiencyDecayTimeIntegrator, 0) +}; + + +templateClassImp(LauNonSmearedSplineEfficiencyDecayTimeIntegrator); + +template +LauNonSmearedSplineEfficiencyDecayTimeIntegrator::LauNonSmearedSplineEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauSplineDecayTimeEfficiency& effModel ) : + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physModel_{physModel}, + effModel_{effModel}, + tauVal_{physModel_.getLifetimeValue()}, + gammaVal_{physModel_.getGammaValue()}, + deltaMVal_{physModel_.getDeltaMValue()}, + deltaGammaVal_{physModel_.getDeltaGammaValue()}, + physicsParamChanged_{physModel_.anythingChanged()}, + tauChanged_{physModel_.lifetimeChanged()}, + deltaMChanged_{physModel_.deltaMChanged()}, + deltaGammaChanged_{physModel_.deltaGammaChanged()}, + effParamChanged_{effModel_.anythingChanged()} +{ + switch ( physModel_.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + // These do not make sense for us: + // - Hist has no need of an integrator + // - Delta and DeltaExp require a resolution function + std::cerr << "ERROR in LauNonSmearedSplineEfficiencyDecayTimeIntegrator::LauNonSmearedSplineEfficiencyDecayTimeIntegrator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } + + // TODO - we should check that the range of the spline matches the range we've been given + + // populate the binomial and factorial arrays + for ( std::size_t i{0}; i < nCoeffs; ++i ) { + factorial_[i] = TMath::Factorial(i); + for ( std::size_t j{0}; j < nCoeffs; ++j ) { + if ( j <= i ) { + binomial_[i][j] = TMath::Binomial(i,j); + } else { + binomial_[i][j] = 0.0; + } + } + } +} + +template +void LauNonSmearedSplineEfficiencyDecayTimeIntegrator::cacheInfo( [[maybe_unused]] const std::vector& abscissaErrors ) +{ + const std::size_t nSplineSegments { effModel_.nSegments() }; + coeffs_.clear(); coeffs_.resize(nSplineSegments); + expTermIkVals_.clear(); expTermIkVals_.resize(nSplineSegments); + trigTermIkVals_.clear(); trigTermIkVals_.resize(nSplineSegments); + hypHTermIkVals_.clear(); hypHTermIkVals_.resize(nSplineSegments); + hypLTermIkVals_.clear(); hypLTermIkVals_.resize(nSplineSegments); + + this->updateCoeffCache(); + this->updateIkCache(true); + + normTerms_ = this->calcNormTerms(); +} + +template +void LauNonSmearedSplineEfficiencyDecayTimeIntegrator::updateCoeffCache() +{ + const std::size_t nSplineSegments { effModel_.nSegments() }; + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + coeffs_[iSeg] = effModel_.getCoefficients(iSeg); + } +} + +template +void LauNonSmearedSplineEfficiencyDecayTimeIntegrator::updateIkCache( const bool forceUpdate ) +{ + if ( tauChanged_ or forceUpdate ) { + const std::complex uExp { gammaVal_ }; + this->calcIntegrals( expTermIkVals_, uExp ); + } + + const LauDecayTime::FuncType funcType { physModel_.getFunctionType() }; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + if ( tauChanged_ or deltaMChanged_ or forceUpdate ) { + const std::complex uTrig { gammaVal_, -deltaMVal_ }; + this->calcIntegrals( trigTermIkVals_, uTrig ); + } + + if ( funcType == LauDecayTime::FuncType::ExpHypTrig ) { + if ( tauChanged_ or deltaGammaChanged_ or forceUpdate ) { + const std::complex uH { gammaVal_ - 0.5 * deltaGammaVal_ }; + const std::complex uL { gammaVal_ + 0.5 * deltaGammaVal_ }; + this->calcIntegrals( hypHTermIkVals_, uH ); + this->calcIntegrals( hypLTermIkVals_, uL ); + } + } + } +} + +template +LauDecayTimeNormTerms LauNonSmearedSplineEfficiencyDecayTimeIntegrator::calcNormTerms( [[maybe_unused]] const std::array& abscissaError ) const +{ + LauDecayTimeNormTerms normTerms; + + const Double_t expIntegral { this->sumIkTerms(expTermIkVals_).real() }; + + normTerms.expTerm = expIntegral; + + const LauDecayTime::FuncType funcType { physModel_.getFunctionType() }; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + + const std::complex trigIntegral { this->sumIkTerms(trigTermIkVals_) }; + + normTerms.cosTerm = trigIntegral.real(); + normTerms.sinTerm = trigIntegral.imag(); + + if ( funcType == LauDecayTime::FuncType::ExpTrig ) { + + normTerms.coshTerm = normTerms.expTerm; + + } else { + + const Double_t integralH { this->sumIkTerms(hypHTermIkVals_).real() }; + const Double_t integralL { this->sumIkTerms(hypLTermIkVals_).real() }; + + const Double_t coshIntegral { 0.5 * (integralH + integralL) }; + const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; + + normTerms.coshTerm = coshIntegral; + normTerms.sinhTerm = sinhIntegral; + } + } + + return normTerms; +} + +template +std::complex LauNonSmearedSplineEfficiencyDecayTimeIntegrator::sumIkTerms( const std::vector& cacheIk ) const +{ + std::complex integral { 0.0 }; + + const std::size_t nSplineSegments { effModel_.nSegments() }; + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + // eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 (with the typo corrected) + for ( std::size_t k{0}; k < nCoeffs; ++k ) { + integral += ( cacheIk[iSeg][k] * coeffs_[iSeg][k] ); + } + } + + return integral; +} + +template +void LauNonSmearedSplineEfficiencyDecayTimeIntegrator::propagateParUpdates() +{ + // if any of the efficiency parameters changed in this iteration + // we need to get new coefficients for each spline segment + if ( effParamChanged_ ) { + this->updateCoeffCache(); + } + + // if any of the physics parameters changed in this iteration + // we need to calculate new values of the Ik integrals + if ( physicsParamChanged_ ) { + this->updateIkCache(); + } + + if ( effParamChanged_ or physicsParamChanged_ ) { + normTerms_ = this->calcNormTerms(); + } +} + +template +void LauNonSmearedSplineEfficiencyDecayTimeIntegrator::calcIntegrals( std::vector& cacheIk, const std::complex& u ) +{ + const std::vector& tVals { effModel_.knotPositions() }; + + const std::size_t nSplineSegments { effModel_.nSegments() }; + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + + const Double_t minAbs { tVals[iSeg] }; + const Double_t maxAbs { tVals[iSeg+1] }; + + for(std::size_t k{0}; k < nCoeffs; ++k) { + cacheIk[iSeg][k] = this->calcIk(k, minAbs, maxAbs, u); + } + } +} + +template +std::complex LauNonSmearedSplineEfficiencyDecayTimeIntegrator::calcIk( const std::size_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u ) const +{ + // calculate I_k using https://arxiv.org/pdf/1407.0748.pdf Appendix B.1 (with n -> n+1 typo corrected - see below) + + // u = Gamma - i * DeltaM in general + + auto G = [&u,this](const std::size_t n){return -factorial_[n]/std::pow(u,n+1);}; //power of n+1 used rather than n, this is due to typo in the paper + auto H = [&u](const std::size_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);}; + + std::complex valIk { 0.0, 0.0 }; + for (std::size_t j{0}; j <= k; ++j) + {valIk += binomial_[k][j]*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );} + + return valIk; +} + +#endif diff --git a/inc/LauNonSmearedUniformEfficiencyDecayTimeIntegrator.hh b/inc/LauNonSmearedUniformEfficiencyDecayTimeIntegrator.hh new file mode 100644 index 0000000..bd22397 --- /dev/null +++ b/inc/LauNonSmearedUniformEfficiencyDecayTimeIntegrator.hh @@ -0,0 +1,110 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauNonSmearedUniformEfficiencyDecayTimeIntegrator.hh + \brief File containing declaration of LauNonSmearedUniformEfficiencyDecayTimeIntegrator class. +*/ + +#ifndef LAU_NONSMEARED_UNIFORMEFFICIENCY_DECAYTIME_INTEGRATOR +#define LAU_NONSMEARED_UNIFORMEFFICIENCY_DECAYTIME_INTEGRATOR + +#include + +#include "Rtypes.h" + +#include "LauAbsDecayTimeIntegrator.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauUniformDecayTimeEfficiency.hh" + +class LauParameter; + +/*! \class LauNonSmearedUniformEfficiencyDecayTimeIntegrator + \brief Class for defining the decay time integrator for the case of no resolution and uniform efficiency +*/ + +class LauNonSmearedUniformEfficiencyDecayTimeIntegrator : public LauAbsDecayTimeIntegrator { + + public: + //! Constructor + LauNonSmearedUniformEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauUniformDecayTimeEfficiency& effModel ); + + //! Cache information from data + /*! + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Retrieve the normalisation terms (optionally for a given event) + /*! + \param [in] iEvt the event index + */ + LauDecayTimeNormTerms getNormTerms( [[maybe_unused]] const std::size_t iEvt ) const override { return normTerms_; } + + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeNormTerms calcNormTerms( const std::array& abscissaError = {1.0,1.0} ) const override; + + private: + Double_t calcExpIntegral() const; + std::pair calcCosSinIntegral() const; + std::pair calcCoshSinhIntegral() const; + + //! The minimum value of the decay time + const Double_t minAbscissa_; + //! The maximum value of the decay time + const Double_t maxAbscissa_; + + //! The physics model + const LauDecayTimePhysicsModel& physModel_; + + //! The efficiency model + const LauUniformDecayTimeEfficiency& effModel_; + + //! Cached value of lifetime + const Double_t& tauVal_; + + //! Cached value of 1/lifetime + const Double_t& gammaVal_; + + //! Cached value of mass difference + const Double_t& deltaMVal_; + + //! Cached value of width difference + const Double_t& deltaGammaVal_; + + //! Have any of the physics parameters changed in the latest fit iteration + const bool& physicsParamChanged_; + + //! The cache of the norm terms + LauDecayTimeNormTerms normTerms_; + + ClassDefOverride(LauNonSmearedUniformEfficiencyDecayTimeIntegrator, 0) +}; + +#endif diff --git a/inc/LauSmearedBinnedEfficiencyDecayTimeIntegrator.hh b/inc/LauSmearedBinnedEfficiencyDecayTimeIntegrator.hh new file mode 100644 index 0000000..5d648d7 --- /dev/null +++ b/inc/LauSmearedBinnedEfficiencyDecayTimeIntegrator.hh @@ -0,0 +1,85 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSmearedBinnedEfficiencyDecayTimeIntegrator.hh + \brief File containing declaration of LauSmearedBinnedEfficiencyDecayTimeIntegrator class. +*/ + +#ifndef LAU_SMEARED_BINNEDEFFICIENCY_DECAYTIME_INTEGRATOR +#define LAU_SMEARED_BINNEDEFFICIENCY_DECAYTIME_INTEGRATOR + +#include + +#include "Rtypes.h" + +#include "LauAbsDecayTimeIntegrator.hh" +#include "LauBinnedDecayTimeEfficiency.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauSmearedUniformEfficiencyDecayTimeIntegrator.hh" +#include "LauUniformDecayTimeEfficiency.hh" + +class LauParameter; + +/*! \class LauSmearedBinnedEfficiencyDecayTimeIntegrator + \brief Class for defining the decay time integrator for the case of no resolution and uniform efficiency +*/ + +class LauSmearedBinnedEfficiencyDecayTimeIntegrator : public LauAbsDecayTimeIntegrator { + + public: + //! Constructor + LauSmearedBinnedEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauBinnedDecayTimeEfficiency& effModel, const LauDecayTimeResolution& resolModel ); + + //! Cache information from data + /*! + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Retrieve the normalisation terms (optionally for a given event) + /*! + \param [in] iEvt the event index + */ + LauDecayTimeNormTerms getNormTerms( const std::size_t iEvt ) const override; + + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeNormTerms calcNormTerms( const std::array& abscissaError ) const override; + + private: + //! The collection of uniform efficiency objects which we pass to the integrators + std::vector efficiencies_; + + //! The collection of uniform efficiency integrator objects to which we delegate + std::vector integrators_; + + ClassDefOverride(LauSmearedBinnedEfficiencyDecayTimeIntegrator, 0) +}; + +#endif diff --git a/inc/LauSmearedDecayTimeCalculator.hh b/inc/LauSmearedDecayTimeCalculator.hh new file mode 100644 index 0000000..85e835b --- /dev/null +++ b/inc/LauSmearedDecayTimeCalculator.hh @@ -0,0 +1,147 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSmearedDecayTimeCalculator.hh + \brief File containing declaration of LauSmearedDecayTimeCalculator class. +*/ + +#ifndef LAU_SMEARED_DECAYTIME_CALCULATOR +#define LAU_SMEARED_DECAYTIME_CALCULATOR + +#include +#include + +#include "Rtypes.h" + +#include "LauAbsDecayTimeCalculator.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauDecayTimeResolution.hh" + +class LauParameter; + +/*! \class LauSmearedDecayTimeCalculator + \brief Class for defining the abstract interface for calculating the terms of the decay time model +*/ + +class LauSmearedDecayTimeCalculator : public LauAbsDecayTimeCalculator { + + public: + //! Constructor + LauSmearedDecayTimeCalculator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauDecayTimeResolution& resolModel ); + + //! Cache information from data + /*! + \param [in] abscissas the values of the per-event decay time + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissas, const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Calculate the terms for given values of decay time and decay time error + /*! + \param [in] abscissa the decay time value + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeTerms calcTerms( const Double_t abscissa, const std::array& abscissaError ) override; + + //! Retrieve the terms for a given event + /*! + \param [in] iEvt the event index + */ + LauDecayTimeTerms getTerms( const std::size_t iEvt ) const override { return terms_[iEvt]; } + + private: + std::complex smearedGeneralTerm( const std::complex& z, const Double_t x ) const; + + //! Recalculate the cached information + void updateCache(); + + //! The minimum value of the decay time + const Double_t minAbscissa_; + //! The maximum value of the decay time + const Double_t maxAbscissa_; + + //! The physics model + const LauDecayTimePhysicsModel& physModel_; + + //! The resolution model + const LauDecayTimeResolution& resolModel_; + + //! Cached value of lifetime + const Double_t& tauVal_; + + //! Cached value of 1/lifetime + const Double_t& gammaVal_; + + //! Cached value of mass difference + const Double_t& deltaMVal_; + + //! Cached value of width difference + const Double_t& deltaGammaVal_; + + //! Cached value of prompt fraction + const Double_t& fracPromptVal_; + + //! Cached value of the number of Gaussians in the resolution model + const std::size_t& nGauss_; + + //! Cached value of the Gaussian fractions + const std::vector& fractionVals_; + + //! Cached value of the Gaussian means + const std::vector& meanVals_; + + //! Cached value of the Gaussian widths + const std::vector& widthVals_; + + //! Cached value of the mean scaling flags + const std::vector& scaleMeans_; + + //! Cached value of the width scaling flags + const std::vector& scaleWidths_; + + //! Have any of the physics parameters changed in the latest fit iteration + const bool& physicsParamChanged_; + + //! Have any of the resolution parameters changed in the latest fit iteration + const bool& resoParamChanged_; + + //! Cache of the decay time for each event + std::vector abscissas_; + + //! Cache of the decay time errors for each event + /*! + The inner array contains { 1.0, error } to allow un-branched control of whether or not to scale + */ + std::vector> abscissaErrors_; + + //! The cache of the terms for each event + std::vector terms_; + + ClassDefOverride(LauSmearedDecayTimeCalculator, 0) +}; + +#endif diff --git a/inc/LauSmearedSplineEfficiencyDecayTimeIntegrator.hh b/inc/LauSmearedSplineEfficiencyDecayTimeIntegrator.hh new file mode 100644 index 0000000..c0f094b --- /dev/null +++ b/inc/LauSmearedSplineEfficiencyDecayTimeIntegrator.hh @@ -0,0 +1,753 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSmearedSplineEfficiencyDecayTimeIntegrator.hh + \brief File containing declaration of LauSmearedSplineEfficiencyDecayTimeIntegrator class. +*/ + +#ifndef LAU_SMEARED_SPLINEEFFICIENCY_DECAYTIME_INTEGRATOR +#define LAU_SMEARED_SPLINEEFFICIENCY_DECAYTIME_INTEGRATOR + +#include +#include +#include + +#include "RooMath.h" +#include "Rtypes.h" +#include "TSystem.h" + +#include "LauAbsDecayTimeIntegrator.hh" +#include "LauConstants.hh" +#include "LauDecayTime.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauDecayTimeResolution.hh" +#include "LauSplineDecayTimeEfficiency.hh" + +class LauParameter; + +/*! \class LauSmearedSplineEfficiencyDecayTimeIntegrator + \brief Class for defining the decay time integrator for the case of no resolution and uniform efficiency +*/ + +template +class LauSmearedSplineEfficiencyDecayTimeIntegrator : public LauAbsDecayTimeIntegrator { + + public: + //! Constructor + LauSmearedSplineEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauSplineDecayTimeEfficiency& effModel, const LauDecayTimeResolution& resolModel ); + + //! Cache information from data + /*! + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Retrieve the normalisation terms (optionally for a given event) + /*! + \param [in] iEvt the event index + */ + LauDecayTimeNormTerms getNormTerms( const std::size_t iEvt ) const override { return normTerms_[iEvt]; } + + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeNormTerms calcNormTerms( const std::array& abscissaError ) const override; + + private: + //! The number of coefficients of each spline segment + static constexpr std::size_t nCoeffs { static_cast>(Order) + 1 }; + + using RealArray = std::array; + using Real2DArray = std::array; + using ComplexArray = std::array,nCoeffs>; + + void updateCoeffCache(); + void updateMeanAndWidthPowersCache(); + void updateKvecMvecCache( const bool forceUpdate = false ); + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeNormTerms calcNormTerms( const std::size_t iEvt ) const; + + std::complex sumIkTerms( const RealArray& coeffs, const ComplexArray& kVec, const ComplexArray& mVec, const RealArray& widthPowers, const RealArray& meanPowers ) const; + LauDecayTimeNormTerms sumIntegrals( const std::size_t iEvt, const std::size_t iSeg, const std::size_t iGauss ) const; + RealArray generateMeanPowers(const std::array& abscissaError, const std::size_t iGauss) const; + RealArray generateWidthPowers(const std::array& abscissaError, const std::size_t iGauss) const; + ComplexArray generateKvector(const std::complex& z) const; + ComplexArray generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t widthOverRoot2, const Double_t mean) const; + ComplexArray generateMvectorLimit(const Double_t abs, const std::complex& z, const Double_t widthOverRoot2, const Double_t mean) const; + + //! The minimum value of the decay time + const Double_t minAbscissa_; + + //! The maximum value of the decay time + const Double_t maxAbscissa_; + + //! The physics model + const LauDecayTimePhysicsModel& physModel_; + + //! The efficiency model + const LauSplineDecayTimeEfficiency& effModel_; + + //! The resolution model + const LauDecayTimeResolution& resolModel_; + + //! Cached value of lifetime + const Double_t& tauVal_; + + //! Cached value of 1/lifetime + const Double_t& gammaVal_; + + //! Cached value of mass difference + const Double_t& deltaMVal_; + + //! Cached value of width difference + const Double_t& deltaGammaVal_; + + //! Cached value of prompt fraction + const Double_t& fracPromptVal_; + + //! Cached value of the number of Gaussians in the resolution model + const std::size_t& nGauss_; + + //! Cached value of the Gaussian fractions + const std::vector& fractionVals_; + + //! Cached value of the Gaussian means + const std::vector& meanVals_; + + //! Cached value of the Gaussian widths + const std::vector& widthVals_; + + //! Cached value of the mean scaling flags + const std::vector& scaleMeans_; + + //! Cached value of the width scaling flags + const std::vector& scaleWidths_; + + //! Have any of the physics parameters changed in the latest fit iteration + const bool& physicsParamChanged_; + + //! Has the lifetime changed in the latest fit iteration + const bool& tauChanged_; + + //! Has the mass difference changed in the latest fit iteration + const bool& deltaMChanged_; + + //! Has the width difference changed in the latest fit iteration + const bool& deltaGammaChanged_; + + //! Has the width difference changed in the latest fit iteration + const bool& fracPromptChanged_; + + //! Have any of the efficiency parameters changed in the latest fit iteration + const bool& effParamChanged_; + + //! Have any of the resolution parameters changed in the latest fit iteration + const bool& resoParamChanged_; + + RealArray factorial_; + + Real2DArray binomial_; + + std::vector coeffs_; + + // outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0th - Order or 1st - Order+1 powers, respectively + std::vector> meanPowerVals_; + std::vector> widthPowerVals_; + + // outer vector has nEvents entries, inner vector has nGauss_ entries, array has 0 - Order entries of the K-vector + std::vector> expTermKvecVals_; + std::vector> trigTermKvecVals_; + std::vector> hypHTermKvecVals_; + std::vector> hypLTermKvecVals_; + + // outer vector has nEvents entries, middle vector has nSplineSegments entries, inner vector has nGauss_ entries, array has 0 - Order entries of the M-vector + std::vector>> expTermMvecVals_; + std::vector>> trigTermMvecVals_; + std::vector>> hypHTermMvecVals_; + std::vector>> hypLTermMvecVals_; + + //! Cache of the decay time errors for each event + /*! + The inner array contains { 1.0, error } to allow un-branched control of whether or not to scale + */ + std::vector> abscissaErrors_; + + //! Cache of the normalisation terms for each event + std::vector normTerms_; + + ClassDefOverride(LauSmearedSplineEfficiencyDecayTimeIntegrator, 0) +}; + + +templateClassImp(LauSmearedSplineEfficiencyDecayTimeIntegrator); + +template +LauSmearedSplineEfficiencyDecayTimeIntegrator::LauSmearedSplineEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauSplineDecayTimeEfficiency& effModel, const LauDecayTimeResolution& resolModel ) : + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physModel_{physModel}, + effModel_{effModel}, + resolModel_{resolModel}, + tauVal_{physModel_.getLifetimeValue()}, + gammaVal_{physModel_.getGammaValue()}, + deltaMVal_{physModel_.getDeltaMValue()}, + deltaGammaVal_{physModel_.getDeltaGammaValue()}, + fracPromptVal_{physModel_.getFracPromptValue()}, + nGauss_{resolModel_.nGauss()}, + fractionVals_{resolModel_.getFractionValues()}, + meanVals_{resolModel_.getMeanValues()}, + widthVals_{resolModel_.getWidthValues()}, + scaleMeans_{resolModel_.scaleMeans()}, + scaleWidths_{resolModel_.scaleWidths()}, + physicsParamChanged_{physModel_.anythingChanged()}, + tauChanged_{physModel_.lifetimeChanged()}, + deltaMChanged_{physModel_.deltaMChanged()}, + deltaGammaChanged_{physModel_.deltaGammaChanged()}, + fracPromptChanged_{physModel_.fracPromptChanged()}, + effParamChanged_{effModel_.anythingChanged()}, + resoParamChanged_{resolModel_.anythingChanged()} +{ + switch ( physModel_.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + // This does not make sense for us: + // - Hist has no need of an integrator + std::cerr << "ERROR in LauSmearedSplineEfficiencyDecayTimeIntegrator::LauSmearedSplineEfficiencyDecayTimeIntegrator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + // TODO These are not yet implemented + std::cerr << "ERROR in LauSmearedSplineEfficiencyDecayTimeIntegrator::LauSmearedSplineEfficiencyDecayTimeIntegrator : Function type not yet supported" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } + + // TODO - we should check that the range of the spline matches the range we've been given + + // populate the binomial and factorial arrays + for ( std::size_t i{0}; i < nCoeffs; ++i ) { + factorial_[i] = TMath::Factorial(i); + for ( std::size_t j{0}; j < nCoeffs; ++j ) { + if ( j <= i ) { + binomial_[i][j] = TMath::Binomial(i,j); + } else { + binomial_[i][j] = 0.0; + } + } + } +} + +template +void LauSmearedSplineEfficiencyDecayTimeIntegrator::cacheInfo( const std::vector& abscissaErrors ) +{ + abscissaErrors_.clear(); + if ( abscissaErrors.empty() ) { + if ( resolModel_.scaleWithPerEventError() ) { + std::cerr << "ERROR in LauSmearedSplineEfficiencyDecayTimeIntegrator::cacheInfo : No per-event decay time errors provided" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + abscissaErrors_.push_back( { 1.0, 1.0 } ); + } else { + abscissaErrors_.reserve( abscissaErrors.size() ); + for ( auto error : abscissaErrors ) { + abscissaErrors_.push_back( { 1.0, error } ); + } + } + + const std::size_t nSplineSegments { effModel_.nSegments() }; + coeffs_.clear(); coeffs_.resize(nSplineSegments); + + const std::size_t nEvents { abscissaErrors_.size() }; + + meanPowerVals_.clear(); meanPowerVals_.resize(nEvents); + for ( auto& innerVec : meanPowerVals_ ) { innerVec.resize(nGauss_); } + widthPowerVals_.clear(); widthPowerVals_.resize(nEvents); + for ( auto& innerVec : widthPowerVals_ ) { innerVec.resize(nGauss_); } + + expTermKvecVals_.clear(); expTermKvecVals_.resize(nEvents); + for ( auto& innerVec : expTermKvecVals_ ) { innerVec.resize(nGauss_); } + trigTermKvecVals_.clear(); trigTermKvecVals_.resize(nEvents); + for ( auto& innerVec : trigTermKvecVals_ ) { innerVec.resize(nGauss_); } + hypHTermKvecVals_.clear(); hypHTermKvecVals_.resize(nEvents); + for ( auto& innerVec : hypHTermKvecVals_ ) { innerVec.resize(nGauss_); } + hypLTermKvecVals_.clear(); hypLTermKvecVals_.resize(nEvents); + for ( auto& innerVec : hypLTermKvecVals_ ) { innerVec.resize(nGauss_); } + + // TODO - reorder iSeg and iGauss in M-vectors? what's better for vectorisation? + expTermMvecVals_.clear(); expTermMvecVals_.resize(nEvents); + for ( auto& middleVec : expTermMvecVals_) { + middleVec.resize(nSplineSegments); + for ( auto& innerVec : middleVec ) { + innerVec.resize(nGauss_); + } + } + trigTermMvecVals_.clear(); trigTermMvecVals_.resize(nEvents); + for ( auto& middleVec : trigTermMvecVals_) { + middleVec.resize(nSplineSegments); + for ( auto& innerVec : middleVec ) { + innerVec.resize(nGauss_); + } + } + hypHTermMvecVals_.clear(); hypHTermMvecVals_.resize(nEvents); + for ( auto& middleVec : hypHTermMvecVals_) { + middleVec.resize(nSplineSegments); + for ( auto& innerVec : middleVec ) { + innerVec.resize(nGauss_); + } + } + hypLTermMvecVals_.clear(); hypLTermMvecVals_.resize(nEvents); + for ( auto& middleVec : hypLTermMvecVals_) { + middleVec.resize(nSplineSegments); + for ( auto& innerVec : middleVec ) { + innerVec.resize(nGauss_); + } + } + + this->updateCoeffCache(); + this->updateMeanAndWidthPowersCache(); + this->updateKvecMvecCache(true); + + normTerms_.clear(); + normTerms_.resize( nEvents ); + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + normTerms_[iEvt] = this->calcNormTerms( iEvt ); + } +} + +template +void LauSmearedSplineEfficiencyDecayTimeIntegrator::propagateParUpdates() +{ + // if any of the efficiency parameters changed in this iteration + // we need to get new coefficients for each spline segment + if ( effParamChanged_ ) { + this->updateCoeffCache(); + } + + // if any of the resolution parameters changed in this iteration + // we need to update the powers of the means and widths + if ( resoParamChanged_ ) { + this->updateMeanAndWidthPowersCache(); + } + + // if any of the physics or resolution parameters changed in this iteration + // we need to calculate new values of the K- and M- vectors + if ( physicsParamChanged_ or resoParamChanged_ ) { + this->updateKvecMvecCache(resoParamChanged_); + } + + if ( effParamChanged_ or physicsParamChanged_ or resoParamChanged_ ) { + const std::size_t nEvents { abscissaErrors_.size() }; + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + normTerms_[iEvt] = this->calcNormTerms( iEvt ); + } + } +} + +template +void LauSmearedSplineEfficiencyDecayTimeIntegrator::updateCoeffCache() +{ + const std::size_t nSplineSegments { effModel_.nSegments() }; + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + coeffs_[iSeg] = effModel_.getCoefficients(iSeg); + } +} + +template +void LauSmearedSplineEfficiencyDecayTimeIntegrator::updateMeanAndWidthPowersCache() +{ + // Calculate powers of mean and width/sqrt(2) needed by all terms in the smeared spline normalisation + + const std::size_t nEvents { abscissaErrors_.size() }; + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + meanPowerVals_[iEvt][iGauss] = this->generateMeanPowers( abscissaErrors_[iEvt], iGauss ); + widthPowerVals_[iEvt][iGauss] = this->generateWidthPowers( abscissaErrors_[iEvt], iGauss ); + } + } +} + + +template +typename LauSmearedSplineEfficiencyDecayTimeIntegrator::RealArray LauSmearedSplineEfficiencyDecayTimeIntegrator::generateMeanPowers(const std::array& abscissaError, const std::size_t iGauss) const +{ + const Double_t mean { meanVals_[iGauss] * abscissaError[scaleMeans_[iGauss]] }; + const Double_t meanSq { mean * mean }; + + RealArray meanPowers; + meanPowers[0] = 1.0; + meanPowers[1] = mean; + meanPowers[2] = meanSq; + meanPowers[3] = mean*meanSq; + if constexpr ( Order == LauSplineOrder::Sixth ) { + meanPowers[4] = meanPowers[2]*meanPowers[2]; + meanPowers[5] = meanPowers[2]*meanPowers[3]; + meanPowers[6] = meanPowers[3]*meanPowers[3]; + } + + return meanPowers; +} + +template +typename LauSmearedSplineEfficiencyDecayTimeIntegrator::RealArray LauSmearedSplineEfficiencyDecayTimeIntegrator::generateWidthPowers(const std::array& abscissaError, const std::size_t iGauss) const +{ + const Double_t widthOverRoot2 { widthVals_[iGauss] * abscissaError[scaleWidths_[iGauss]] / LauConstants::root2 }; + const Double_t widthOverRoot2Sq { widthOverRoot2 * widthOverRoot2 }; + + RealArray widthPowers; + widthPowers[0] = widthOverRoot2; + widthPowers[1] = widthOverRoot2Sq; + widthPowers[2] = widthOverRoot2*widthOverRoot2Sq; + widthPowers[3] = widthOverRoot2Sq*widthOverRoot2Sq; + if constexpr ( Order == LauSplineOrder::Sixth ) { + widthPowers[4] = widthPowers[1]*widthPowers[2]; + widthPowers[5] = widthPowers[2]*widthPowers[2]; + widthPowers[6] = widthPowers[2]*widthPowers[3]; + } + + return widthPowers; +} + + +template +typename LauSmearedSplineEfficiencyDecayTimeIntegrator::ComplexArray LauSmearedSplineEfficiencyDecayTimeIntegrator::generateKvector(const std::complex& z) const +{ + const std::complex zr { 1.0/z }; + const std::complex zr2 { zr*zr }; + + ComplexArray K; + + K[0] = 0.5*zr; + K[1] = 0.5*zr2; + K[2] = zr*(1.0+zr2); + K[3] = 3.0*zr2*(1.0+zr2); + if constexpr ( Order == LauSplineOrder::Sixth ) { + K[4] = 6.0*zr*(2.0*zr2*(1.0+zr2)+1.0); + K[5] = 30.0*zr2*(2.0*zr2*(1.0+zr2)+1.0); + K[6] = 60.0*zr*(3.0*zr2*(2.0*zr2*(1.0+zr2)+1.0)+1.0); + } + + return K; +} + +template +typename LauSmearedSplineEfficiencyDecayTimeIntegrator::ComplexArray LauSmearedSplineEfficiencyDecayTimeIntegrator::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t widthOverRoot2, const Double_t mean) const +{ + ComplexArray minM { this->generateMvectorLimit( minAbs, z, widthOverRoot2, mean ) }; + ComplexArray maxM { this->generateMvectorLimit( maxAbs, z, widthOverRoot2, mean ) }; + + ComplexArray M; + for ( std::size_t i{0}; i < nCoeffs; ++i ) { + M[i] = maxM[i] - minM[i]; + } + + return M; +} + +template +typename LauSmearedSplineEfficiencyDecayTimeIntegrator::ComplexArray LauSmearedSplineEfficiencyDecayTimeIntegrator::generateMvectorLimit(const Double_t abs, const std::complex& z, const Double_t widthOverRoot2, const Double_t mean) const +{ + using namespace std::complex_literals; + + const Double_t x { (abs - mean) / (2.0 * widthOverRoot2) }; + const Double_t x2 { x * x }; + const Double_t ex2 { TMath::Exp(-(x2)) }; + + const std::complex arg { 1i * (z - x) }; + + //fad = the faddeeva term times the ex2 value (done in different ways depending on the domain) + std::complex fad; + + if ( arg.imag() < -5.0 ) { + fad = std::exp(-(x2) - (arg*arg)) * RooMath::erfc(-1i * arg); + } else { + fad = ex2 * RooMath::faddeeva(arg); + } + + const Double_t invRootPi { 1.0 / LauConstants::rootPi }; + + ComplexArray M; + M[0] = RooMath::erf(x) - fad; + M[1] = -2.0 * (invRootPi*ex2 + x*fad); + M[2] = -2.0 * (2.0*x*invRootPi*ex2 + (2.0*x2 - 1.0)*fad); + M[3] = -4.0 * ((2.0*x2 - 1.0)*invRootPi*ex2 + x*(2.0*x2-3.0)*fad); + if constexpr ( Order == LauSplineOrder::Sixth ) { + const Double_t x3 { x * x2 }; + M[4] = -4.0 * ((2.0*x2 - 3.0)*2.0*x*ex2*invRootPi + (3.0+4.0*x2*(x2-3.0))*fad); + M[5] = -8.0 * ((3.0 + (x2 - 3.0)*4.0*x2)*ex2*invRootPi + (15.0 + (x2-5.0)*4.0*x2)*x*fad); + M[6] = -8.0 * ((15.0 + (x2 - 5.0)*4.0*x2)*2.0*x*ex2*invRootPi + (8.0*x3*x3 - 60.0*x2*x2 + 90.0*x2 - 15.0)*fad); + } + + return M; +} + +template +void LauSmearedSplineEfficiencyDecayTimeIntegrator::updateKvecMvecCache( const bool forceUpdate ) +{ + const std::size_t nEvents { abscissaErrors_.size() }; + const std::size_t nSplineSegments { effModel_.nSegments() }; + const std::vector& tVals { effModel_.knotPositions() }; + + std::complex z { 0.0, 0.0 }; + + if ( tauChanged_ or forceUpdate ) { + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + const Double_t widthOverRoot2 { widthPowerVals_[iEvt][iGauss][0] }; + const Double_t mean { meanPowerVals_[iEvt][iGauss][1] }; + + z = gammaVal_ * widthOverRoot2; + + expTermKvecVals_[iEvt][iGauss] = this->generateKvector(z); + + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + const Double_t minAbs { tVals[iSeg] }; + const Double_t maxAbs { tVals[iSeg+1] }; + expTermMvecVals_[iEvt][iSeg][iGauss] = this->generateMvector(minAbs, maxAbs, z, widthOverRoot2, mean); + } + } + } + } + + const LauDecayTime::FuncType funcType { physModel_.getFunctionType() }; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + if ( tauChanged_ or deltaMChanged_ or forceUpdate ) { + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + const Double_t widthOverRoot2 { widthPowerVals_[iEvt][iGauss][0] }; + const Double_t mean { meanPowerVals_[iEvt][iGauss][1] }; + + z.real( gammaVal_ * widthOverRoot2 ); + z.imag( -deltaMVal_ * widthOverRoot2 ); + + trigTermKvecVals_[iEvt][iGauss] = this->generateKvector(z); + + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + const Double_t minAbs { tVals[iSeg] }; + const Double_t maxAbs { tVals[iSeg+1] }; + trigTermMvecVals_[iEvt][iSeg][iGauss] = this->generateMvector(minAbs, maxAbs, z, widthOverRoot2, mean); + } + } + } + } + + if ( funcType == LauDecayTime::FuncType::ExpHypTrig ) { + if ( tauChanged_ or deltaGammaChanged_ or forceUpdate ) { + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + const Double_t widthOverRoot2 { widthPowerVals_[iEvt][iGauss][0] }; + const Double_t mean { meanPowerVals_[iEvt][iGauss][1] }; + + z.real( ( gammaVal_ - 0.5 * deltaGammaVal_ ) * widthOverRoot2 );; + z.imag( 0.0 ); + hypHTermKvecVals_[iEvt][iGauss] = this->generateKvector(z); + + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + const Double_t minAbs { tVals[iSeg] }; + const Double_t maxAbs { tVals[iSeg+1] }; + hypHTermMvecVals_[iEvt][iSeg][iGauss] = this->generateMvector(minAbs, maxAbs, z, widthOverRoot2, mean); + } + + z.real( ( gammaVal_ + 0.5 * deltaGammaVal_ ) * widthOverRoot2 ); + z.imag( 0.0 ); + hypLTermKvecVals_[iEvt][iGauss] = this->generateKvector(z); + + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + const Double_t minAbs { tVals[iSeg] }; + const Double_t maxAbs { tVals[iSeg+1] }; + hypLTermMvecVals_[iEvt][iSeg][iGauss] = this->generateMvector(minAbs, maxAbs, z, widthOverRoot2, mean); + } + } + } + } + } + } +} + +template +LauDecayTimeNormTerms LauSmearedSplineEfficiencyDecayTimeIntegrator::calcNormTerms( const std::array& abscissaError ) const +{ + LauDecayTimeNormTerms normTerms; + + const LauDecayTime::FuncType funcType { physModel_.getFunctionType() }; + const std::size_t nSplineSegments { effModel_.nSegments() }; + const std::vector& tVals { effModel_.knotPositions() }; + + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + + const RealArray meanPowers { this->generateMeanPowers( abscissaError, iGauss ) }; + const RealArray widthPowers { this->generateWidthPowers( abscissaError, iGauss ) }; + + const Double_t mean { meanPowers[1] }; + const Double_t widthOverRoot2 { widthPowers[0] }; + const Double_t fraction { fractionVals_[iGauss] }; + + const std::complex zExp { gammaVal_ * widthOverRoot2 }; + const std::complex zTrig { gammaVal_ * widthOverRoot2, -deltaMVal_ * widthOverRoot2 }; + const std::complex zHypH { ( gammaVal_ - 0.5 * deltaGammaVal_ ) * widthOverRoot2 }; + const std::complex zHypL { ( gammaVal_ + 0.5 * deltaGammaVal_ ) * widthOverRoot2 }; + + ComplexArray expTermKvec { this->generateKvector(zExp) }; + ComplexArray trigTermKvec; + ComplexArray hypHTermKvec; + ComplexArray hypLTermKvec; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + trigTermKvec = this->generateKvector(zTrig); + if ( funcType == LauDecayTime::FuncType::ExpHypTrig ) { + hypHTermKvec = this->generateKvector(zHypH); + hypLTermKvec = this->generateKvector(zHypL); + } + } + + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + const Double_t minAbs { tVals[iSeg] }; + const Double_t maxAbs { tVals[iSeg+1] }; + + ComplexArray expTermMvec { this->generateMvector(minAbs, maxAbs, zExp, widthOverRoot2, mean) }; + ComplexArray trigTermMvec; + ComplexArray hypHTermMvec; + ComplexArray hypLTermMvec; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + trigTermMvec = this->generateMvector(minAbs, maxAbs, zTrig, widthOverRoot2, mean); + if ( funcType == LauDecayTime::FuncType::ExpHypTrig ) { + hypHTermMvec = this->generateMvector(minAbs, maxAbs, zHypH, widthOverRoot2, mean); + hypLTermMvec = this->generateMvector(minAbs, maxAbs, zHypL, widthOverRoot2, mean); + } + } + + const Double_t expIntegral { this->sumIkTerms(coeffs_[iSeg], expTermKvec, expTermMvec, widthPowers, meanPowers).real() }; + normTerms.expTerm += fraction * expIntegral; + + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + const std::complex trigIntegral { this->sumIkTerms(coeffs_[iSeg], trigTermKvec, trigTermMvec, widthPowers, meanPowers) }; + normTerms.cosTerm += fraction * trigIntegral.real(); + normTerms.sinTerm += fraction * trigIntegral.imag(); + + if ( funcType == LauDecayTime::FuncType::ExpTrig ) { + + normTerms.coshTerm += fraction * expIntegral; + + } else { + const Double_t integralH { this->sumIkTerms(coeffs_[iSeg], hypHTermKvec, hypHTermMvec, widthPowers, meanPowers).real() }; + const Double_t integralL { this->sumIkTerms(coeffs_[iSeg], hypLTermKvec, hypLTermMvec, widthPowers, meanPowers).real() }; + + const Double_t coshIntegral { 0.5 * (integralH + integralL) }; + const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; + + normTerms.coshTerm += fraction * coshIntegral; + normTerms.sinhTerm += fraction * sinhIntegral; + } + } + + } + } + + return normTerms; +} + +template +LauDecayTimeNormTerms LauSmearedSplineEfficiencyDecayTimeIntegrator::calcNormTerms( const std::size_t iEvt ) const +{ + LauDecayTimeNormTerms normTerms; + + // Sum the integral for each spline segment and each Gaussian + const std::size_t nSplineSegments { effModel_.nSegments() }; + for ( std::size_t iSeg{0}; iSeg < nSplineSegments; ++iSeg ) { + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + + normTerms += this->sumIntegrals( iEvt, iSeg, iGauss ); + } + } + + return normTerms; +} + +template +LauDecayTimeNormTerms LauSmearedSplineEfficiencyDecayTimeIntegrator::sumIntegrals( const std::size_t iEvt, const std::size_t iSeg, const std::size_t iGauss ) const +{ + LauDecayTimeNormTerms normTerms; + + const Double_t fraction { fractionVals_[iGauss] }; + + const Double_t expIntegral { this->sumIkTerms(coeffs_[iSeg], expTermKvecVals_[iEvt][iGauss], expTermMvecVals_[iEvt][iSeg][iGauss], widthPowerVals_[iEvt][iGauss], meanPowerVals_[iEvt][iGauss]).real() }; + + normTerms.expTerm = fraction * expIntegral; + + const LauDecayTime::FuncType funcType { physModel_.getFunctionType() }; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + + const std::complex trigIntegral { this->sumIkTerms(coeffs_[iSeg], trigTermKvecVals_[iEvt][iGauss], trigTermMvecVals_[iEvt][iSeg][iGauss], widthPowerVals_[iEvt][iGauss], meanPowerVals_[iEvt][iGauss]) }; + + normTerms.cosTerm = fraction * trigIntegral.real(); + normTerms.sinTerm = fraction * trigIntegral.imag(); + + if ( funcType == LauDecayTime::FuncType::ExpTrig ) { + + normTerms.coshTerm = normTerms.expTerm; + + } else { + + const Double_t integralH { this->sumIkTerms(coeffs_[iSeg], hypHTermKvecVals_[iEvt][iGauss], hypHTermMvecVals_[iEvt][iSeg][iGauss], widthPowerVals_[iEvt][iGauss], meanPowerVals_[iEvt][iGauss]).real() }; + const Double_t integralL { this->sumIkTerms(coeffs_[iSeg], hypLTermKvecVals_[iEvt][iGauss], hypLTermMvecVals_[iEvt][iSeg][iGauss], widthPowerVals_[iEvt][iGauss], meanPowerVals_[iEvt][iGauss]).real() }; + + const Double_t coshIntegral { 0.5 * (integralH + integralL) }; + const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; + + normTerms.coshTerm = fraction * coshIntegral; + normTerms.sinhTerm = fraction * sinhIntegral; + } + } + + return normTerms; +} + +template +std::complex LauSmearedSplineEfficiencyDecayTimeIntegrator::sumIkTerms( const RealArray& coeffs, const ComplexArray& kVec, const ComplexArray& mVec, const RealArray& widthPowers, const RealArray& meanPowers ) const +{ + // Triple sum to get N (eqn 31 and 29 in https://arxiv.org/pdf/1407.0748.pdf) + std::complex integral { 0.0 }; + + for ( std::size_t k{0}; k < nCoeffs; ++k ) { + for ( std::size_t n{0}; n <= k; ++n ) { + for ( std::size_t i{0}; i <= n; ++i ) { + //The binomial coefficient terms + const Double_t b { binomial_[n][i]*binomial_[k][n] }; + integral += widthPowers[n]*coeffs[k]*meanPowers[k-n]*kVec[i]*mVec[n-i]*b; + }}} + + return integral; +} + +#endif diff --git a/inc/LauSmearedUniformEfficiencyDecayTimeIntegrator.hh b/inc/LauSmearedUniformEfficiencyDecayTimeIntegrator.hh new file mode 100644 index 0000000..3f1507c --- /dev/null +++ b/inc/LauSmearedUniformEfficiencyDecayTimeIntegrator.hh @@ -0,0 +1,144 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSmearedUniformEfficiencyDecayTimeIntegrator.hh + \brief File containing declaration of LauSmearedUniformEfficiencyDecayTimeIntegrator class. +*/ + +#ifndef LAU_SMEARED_UNIFORMEFFICIENCY_DECAYTIME_INTEGRATOR +#define LAU_SMEARED_UNIFORMEFFICIENCY_DECAYTIME_INTEGRATOR + +#include +#include + +#include "Rtypes.h" + +#include "LauAbsDecayTimeIntegrator.hh" +#include "LauDecayTimePhysicsModel.hh" +#include "LauDecayTimeResolution.hh" +#include "LauUniformDecayTimeEfficiency.hh" + +class LauParameter; + +/*! \class LauSmearedUniformEfficiencyDecayTimeIntegrator + \brief Class for defining the decay time integrator for the case of no resolution and uniform efficiency +*/ + +class LauSmearedUniformEfficiencyDecayTimeIntegrator : public LauAbsDecayTimeIntegrator { + + public: + //! Constructor + LauSmearedUniformEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauUniformDecayTimeEfficiency& effModel, const LauDecayTimeResolution& resolModel ); + + //! Cache information from data + /*! + \param [in] abscissaErrors the values of the per-event decay time error + */ + void cacheInfo( const std::vector& abscissaErrors ) override; + + //! Propagate any updates to parameters and recalculate information as neeeded + void propagateParUpdates() override; + + //! Retrieve the normalisation terms (optionally for a given event) + /*! + \param [in] iEvt the event index + */ + LauDecayTimeNormTerms getNormTerms( const std::size_t iEvt ) const override { return normTerms_[iEvt]; } + + //! Calculate the normalisation terms for a given value of the decay time error + /*! + \param [in] abscissaError the decay time error value (actually {1.0,err} to allow branchless control of scaling) + */ + LauDecayTimeNormTerms calcNormTerms( const std::array& abscissaError ) const override; + + private: + LauDecayTimeNormTerms calcIntegrals( const std::array& abscissaError, const std::size_t iGauss ) const; + std::complex smearedGeneralIntegral(const std::complex& z, const Double_t sigmaOverRoot2, const Double_t mu) const; + + //! The minimum value of the decay time + const Double_t minAbscissa_; + //! The maximum value of the decay time + const Double_t maxAbscissa_; + + //! The physics model + const LauDecayTimePhysicsModel& physModel_; + + //! The efficiency model + const LauUniformDecayTimeEfficiency& effModel_; + + //! The resolution model + const LauDecayTimeResolution& resolModel_; + + //! Cached value of lifetime + const Double_t& tauVal_; + + //! Cached value of 1/lifetime + const Double_t& gammaVal_; + + //! Cached value of mass difference + const Double_t& deltaMVal_; + + //! Cached value of width difference + const Double_t& deltaGammaVal_; + + //! Cached value of prompt fraction + const Double_t& fracPromptVal_; + + //! Cached value of the number of Gaussians in the resolution model + const std::size_t& nGauss_; + + //! Cached value of the Gaussian fractions + const std::vector& fractionVals_; + + //! Cached value of the Gaussian means + const std::vector& meanVals_; + + //! Cached value of the Gaussian widths + const std::vector& widthVals_; + + //! Cached value of the mean scaling flags + const std::vector& scaleMeans_; + + //! Cached value of the width scaling flags + const std::vector& scaleWidths_; + + //! Have any of the physics parameters changed in the latest fit iteration + const bool& physicsParamChanged_; + + //! Have any of the resolution parameters changed in the latest fit iteration + const bool& resoParamChanged_; + + //! Cache of the decay time errors for each event + /*! + The inner array contains { 1.0, error } to allow un-branched control of whether or not to scale + */ + std::vector> abscissaErrors_; + + //! Cache of the normalisation terms for each event + std::vector normTerms_; + + ClassDefOverride(LauSmearedUniformEfficiencyDecayTimeIntegrator, 0) +}; + +#endif diff --git a/inc/LauSplineDecayTimeEfficiency.hh b/inc/LauSplineDecayTimeEfficiency.hh new file mode 100644 index 0000000..4cde051 --- /dev/null +++ b/inc/LauSplineDecayTimeEfficiency.hh @@ -0,0 +1,152 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSplineDecayTimeEfficiency.hh + \brief File containing declaration of LauSplineDecayTimeEfficiency class. +*/ + +/*! \class LauSplineDecayTimeEfficiency + \brief Class for defining a spline-interpolated model for decay time efficiency +*/ + +#ifndef LAU_SPLINE_DECAYTIME_EFFICIENCY +#define LAU_SPLINE_DECAYTIME_EFFICIENCY + +#include +#include +#include + +#include "Rtypes.h" + +#include "Lau1DCubicSpline.hh" +#include "LauAbsDecayTimeEfficiency.hh" +#include "LauParameter.hh" + +//! Defines the allowed orders of spline polynomials +enum class LauSplineOrder : std::size_t { + Cubic = 3, //!< 3rd order + Sixth = 6 //!< 6th order +}; + +template +class LauAbsSplineDecayTimeEfficiency : public LauAbsDecayTimeEfficiency { + public: + //! The number of coefficients of each spline segment + static constexpr std::size_t nCoeffs { static_cast>(Order) + 1 }; + + private: + ClassDefOverride(LauAbsSplineDecayTimeEfficiency, 0) +}; + +template +class LauSplineDecayTimeEfficiency : public LauAbsSplineDecayTimeEfficiency +{}; + +template <> +class LauSplineDecayTimeEfficiency : public LauAbsSplineDecayTimeEfficiency { + + public: + //! Constructor + LauSplineDecayTimeEfficiency( std::unique_ptr effSpline ); + + //! Retrieve the efficiency for a given value of the decay time + /*! + \param abscissa the value of the decay time + \return the efficiency + */ + Double_t getEfficiency( const Double_t abscissa ) const override; + + //! Retrieve the parameters of the efficiency model so that they can be loaded into a fit + /*! + \return the parameters of the efficiency model + */ + std::vector getParameters() override { return params_; } + + //! Initialise the parameter cache + /*! + Must be called prior to starting fitting or generation + */ + void initialise() override; + + //! Propagate any updates to parameters and recalculate information as neeeded + /*! + Should be called at each fit iteration + */ + void propagateParUpdates() override; + + //! Retrieve the number of segments in the spline + std::size_t nSegments() const { return effSpline_->getnKnots() - 1; } + + //! Retrieve the positions of the spline knots + const std::vector& knotPositions() const { return effSpline_->getXValues(); } + + //! Fix all knots + void fixKnots(); + + //! Float all knots + void floatKnots(); + + //! Fix or float a specific knot + /*! + \param knotIndex the index of the knot to fix/float + \param fixed true if knot to be fixed, false if knot to be floated + */ + void fixKnot( const std::size_t knotIndex, const bool fixed ); + + //! Retrieve the polynomial coefficients for the given spline segment + /*! + \param segmentIndex the index of the spline segment + \return the polynomial coefficients + */ + std::array getCoefficients( const std::size_t segmentIndex ) const { return effSpline_->getCoefficients(segmentIndex); } + + //! Retrieve whether any of the parameters have changed in the latest fit iteration + const bool& anythingChanged() const override { return anyKnotChanged_; } + + private: + //! Update the cached parameter values + void updateParameterCache(); + + //! The spline + std::unique_ptr effSpline_; + + //! The spline parameters + std::vector> ownedParams_; + + //! The spline parameters (for giving access) + std::vector params_; + + //! The spline values + std::vector values_; + + //! Are any of the knot parameters floating in the fit? + bool anyKnotFloating_{false}; + + //! Have any of the knot parameters changed in the lastest fit iteration? + bool anyKnotChanged_{false}; + + ClassDefOverride(LauSplineDecayTimeEfficiency, 0) +}; + +#endif diff --git a/inc/LauTimeDepFitModel.hh b/inc/LauTimeDepFitModel.hh index 555186f..96f3912 100644 --- a/inc/LauTimeDepFitModel.hh +++ b/inc/LauTimeDepFitModel.hh @@ -1,733 +1,731 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauTimeDepFitModel.hh \brief File containing declaration of LauTimeDepFitModel class. */ /*! \class LauTimeDepFitModel \brief Class for defining a time-dependent fit model. LauTimeDepFitModel is a class that allows the user to define a three-body Dalitz plot according to the isobar model, i.e. defining a set of resonances that have complex amplitudes that can interfere with each other. It extends the LauSimpleFitModel and LauCPFitModel models in that it allows the fitting of time-dependent particle/antiparticle decays to flavour-conjugate Dalitz plots, including their interference through mixing. */ #ifndef LAU_TIMEDEP_FIT_MODEL #define LAU_TIMEDEP_FIT_MODEL #include #include #include #include #include "TString.h" #include "TStopwatch.h" #include "TSystem.h" #include "LauAbsFitModel.hh" +#include "LauComplex.hh" #include "LauConstants.hh" #include "LauEmbeddedData.hh" +#include "LauFlavTag.hh" #include "LauParameter.hh" -#include "LauFlavTag.hh" -#include "LauCategoryFlavTag.hh" +//#include "LauCategoryFlavTag.hh" class LauAbsBkgndDPModel; class LauAbsCoeffSet; class LauAbsPdf; class LauDecayTimePdf; class LauIsobarDynamics; class LauKinematics; class LauScfMap; class LauTimeDepFitModel : public LauAbsFitModel { public: //! Possible CP eigenvalues (the intrinsic CP of the final state particles) enum CPEigenvalue { CPOdd = -1, /*!< CP odd final state */ QFS = 0, /*!< Quasi Flavour Specific final state */ CPEven = 1 /*!< CP even final state */ }; //! Constructor /*! \param [in] modelB0bar DP model for the antiparticle \param [in] modelB0 DP model for the particle \param [in] flavTag flavour tagging information */ LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag); //! Destructor virtual ~LauTimeDepFitModel(); //! Set the signal event yield /*! \param [in] nSigEvents contains the signal yield and option to fix it */ virtual void setNSigEvents(LauParameter* nSigEvents); //! Set the signal event yield and asymmetry /*! \param [in] nSigEvents contains the signal yield and option to fix it \param [in] sigAsym contains the signal asymmetry and option to fix it */ virtual void setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym); //! Set the number of background events /*! The name of the parameter must be that of the corresponding background category (so that it can be correctly assigned) \param [in] nBkgndEvents contains the name, yield and option to fix the yield of the background */ virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents); //! Set the background event yield and asymmetry /*! \param [in] nBkgEvents contains the background yield and option to fix it \param [in] BkgAsym contains the background asymmetry and option to fix it */ virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym); //! Set the background DP models (null pointer for BbarModel implies same model for both) /*! \param [in] bkgndClass the name of the background class \param [in] BModel the DP model of the background for B (particle) decays \param [in] BbarModel the DP model of the background for Bbar (anti-particle) decays */ void setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* BModel, LauAbsBkgndDPModel* BbarModel); //! Switch on/off storage of amplitude info in generated ntuple void storeGenAmpInfo(Bool_t storeInfo) { storeGenAmpInfo_ = storeInfo; } //! Set CP eigenvalue /*! The CP eigenvalue can be supplied on an event-by-event basis, e.g. if the data contains daughters that are D0 mesons that can decay to either K+ K- (CP even) or KS pi0 (CP odd). This method allows you to set the default value that should be used if the data does not contain this information as well as the name of the variable in the data that will specify this information. If completely unspecified all events will be assumed to be CP even. \param defaultCPEV the default for the eigenvalue \param evVarName the variable name in the data tree that specifies the CP eigenvalue */ inline void setCPEigenvalue( const CPEigenvalue defaultCPEV, const TString& cpevVarName = "" ) { cpEigenValue_ = defaultCPEV; cpevVarName_ = cpevVarName; } //! Set the DP amplitude coefficients /*! \param [in] coeffSet the set of coefficients */ void setAmpCoeffSet(LauAbsCoeffSet* coeffSet); //! Set the decay time PDFs /*! \param [in] position the tagger position in the vectors \param [in] pdf the signal decay time PDF */ void setSignalDtPdf(LauDecayTimePdf* pdf); //! Set the decay time PDFs /*! \param [in] tagCat the tagging category for which the PDF should be used \param [in] pdf the background decay time PDF */ void setBkgndDtPdf(const TString& bkgndClass, LauDecayTimePdf* pdf); //! Set the signal PDF for a given variable /*! \param [in] tagCat the tagging category for which the PDF should be used \param [in] pdf the PDF to be added to the signal model */ void setSignalPdfs(LauAbsPdf* pdf); //! Set the background PDF /*! \param [in] bkgndClass the name of the background class \param [in] pdf the PDF to be added to the background model */ void setBkgndPdf(const TString& bkgndClass, LauAbsPdf* pdf); void setSignalFlavTagPdfs( const Int_t tagCat, LauAbsPdf* pdf); void setBkgdFlavTagPdfs( const TString name, LauAbsPdf* pdf); //! Embed full simulation events for the signal, rather than generating toy from the PDFs /*! \param [in] tagCat the tagging category for which the file should be used \param [in] fileName the name of the file containing the events \param [in] treeName the name of the tree \param [in] reuseEventsWithinEnsemble \param [in] reuseEventsWithinExperiment \param [in] useReweighting */ void embedSignal(const TString& fileName, const TString& treeName, const Bool_t reuseEventsWithinEnsemble, const Bool_t reuseEventsWithinExperiment = kFALSE); void embedBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE); //! Set the value of the mixing phase /*! \param [in] phiMix the value of the mixing phase \param [in] fixPhiMix whether the value should be fixed or floated \param [in] useSinCos whether to use the sine and cosine as separate parameters or to just use the mixing phase itself */ void setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos = kFALSE); //! Initialise the fit virtual void initialise(); //! Initialise the signal DP model virtual void initialiseDPModels(); //! Recalculate Normalization the signal DP models virtual void recalculateNormalisation(); //! Update the coefficients virtual void updateCoeffs(); // Toy MC generation and fitting overloaded functions virtual Bool_t genExpt(); //! Set the maximum value of A squared to be used in the accept/reject /*! \param [in] value the new value */ inline void setASqMaxValue(const Double_t value) {aSqMaxSet_ = value;} //! Weight events based on the DP model /*! \param [in] dataFileName the name of the data file \param [in] dataTreeName the name of the data tree */ virtual void weightEvents( const TString& dataFileName, const TString& dataTreeName ); //! Calculate things that depend on the fit parameters after they have been updated by Minuit virtual void propagateParUpdates(); //! Read in the input fit data variables, e.g. m13Sq and m23Sq virtual void cacheInputFitVars(); //! Check the initial fit parameters virtual void checkInitFitParams(); //! Get the fit results and store them /*! \param [in] tablePrefixName prefix for the name of the output file */ virtual void finaliseFitResults(const TString& tablePrefixName); //! Save the pdf Plots for all the resonances of experiment number fitExp /*! TODO - not working in this model!! \param [in] label prefix for the file name to be saved */ virtual void savePDFPlots(const TString& label); //! Save the pdf Plots for the sum of ressonances correspondint to "sin" of experiment number fitExp /*! TODO - not working in this model!! \param [in] label prefix for the file name to be saved \param [in] spin spin of the wave to be saved */ virtual void savePDFPlotsWave(const TString& label, const Int_t& spin); //! Print the fit fractions, total DP rate and mean efficiency /*! \param [out] output the stream to which to print */ virtual void printFitFractions(std::ostream& output); //! Print the asymmetries /*! \param [out] output the stream to which to print */ virtual void printAsymmetries(std::ostream& output); //! Write the fit results in latex table format /*! \param [in] outputFile the name of the output file */ virtual void writeOutTable(const TString& outputFile); //! Store the per event likelihood values virtual void storePerEvtLlhds(); // Methods to do with calculating the likelihood functions // and manipulating the fitting parameters. //! Get the total likelihood for each event /*! \param [in] iEvt the event number */ virtual Double_t getTotEvtLikelihood(const UInt_t iEvt); //! Calculate the signal and background likelihoods for the DP for a given event /*! \param [in] iEvt the event number */ virtual void getEvtDPDtLikelihood(const UInt_t iEvt); //! Determine the signal and background likelihood for the extra variables for a given event /*! \param [in] iEvt the event number */ virtual void getEvtExtraLikelihoods(const UInt_t iEvt); virtual void getEvtFlavTagLikelihood(const UInt_t iEvt); //! Get the total number of events /*! \return the total number of events */ virtual Double_t getEventSum() const; //! Set the fit parameters for the DP model void setSignalDPParameters(); //! Set the fit parameters for the decay time PDFs void setDecayTimeParameters(); //! Set the fit parameters for the extra PDFs void setExtraPdfParameters(); //! Set the initial yields void setFitNEvents(); //! Set the calibration parameters void setCalibParams(); //! Set the tagging efficiency parameters void setTagEffParams(); - //! Set the efficiency parameters - void setEffiParams(); - //! Set the asymmetry parameters void setAsymParams(); //! Set the tagging asymmetry parameters void setFlavTagAsymParams(); //! Set-up other parameters that are derived from the fit results, e.g. fit fractions void setExtraNtupleVars(); //! Set production and detections asymmetries /*! \param [in] AProd is the production asymmetry definied as (N_BqBar-N_Bq)/(N_BqBar+N_Bq) */ void setAsymmetries(const Double_t AProd, const Bool_t AProdFix); //! Set production and detections asymmetries /*! \param [in] AProd is the production asymmetry definied as (N_BqBar-N_Bq)/(N_BqBar+N_Bq) */ void setBkgndAsymmetries(const TString& bkgndClass, const Double_t AProd, const Bool_t AProdFix); //! Randomise the initial fit parameters void randomiseInitFitPars(); //! Method to set up the storage for background-related quantities called by setBkgndClassNames virtual void setupBkgndVectors(); //! Calculate the CP asymmetries /*! \param [in] initValues is this before or after the fit */ void calcAsymmetries(const Bool_t initValues = kFALSE); //! Finalise value of mixing phase void checkMixingPhase(); //! Return the map of signal decay time PDFs typedef std::map< Int_t, LauDecayTimePdf*> LauTagCatDtPdfMap; LauDecayTimePdf* getSignalDecayTimePdf(){return signalDecayTimePdf_;} //! Return the map of background decay time PDFs std::vector getBackgroundDecayTimePdfs(){return BkgndDecayTimePdfs_;} protected: typedef std::map< Int_t, LauParameter> LauTagCatParamMap; typedef std::map< Int_t, LauPdfList > LauTagCatPdfListMap; typedef std::map< Int_t, LauAbsPdf* > LauTagCatPdfMap; typedef std::map< TString, LauAbsPdf* > LauBkgdPdfMap; typedef std::map< Int_t, Int_t > LauTagCatEventsMap; typedef std::map< Int_t, LauEmbeddedData* > LauTagCatEmbDataMap; //typedef std::map< Int_t, std::pair > LauTaggerGenInfo; //typedef std::map< std::pair, LauTaggerGenInfo > LauGenInfo; typedef std::map< TString, std::pair > LauGenInfo; typedef std::vector LauTagCatEmbDataMapList; typedef std::vector LauBkgndDPModelList; typedef std::vector LauBkgndPdfsList; typedef std::vector LauBkgndYieldList; typedef std::vector LauBkgndReuseEventsList; //! Determine the number of events to generate for each hypothesis LauGenInfo eventsToGenerate(); //! Generate signal event Bool_t generateSignalEvent(); //! Generate background event /*! \param [in] bgID ID number of the background class */ Bool_t generateBkgndEvent(UInt_t bgID); //! Setup the required ntuple branches void setupGenNtupleBranches(); //! Store all of the DP and decay time information void setDPDtBranchValues(); //! Generate from the extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] embeddedData the embedded data sample */ void generateExtraPdfValues(LauPdfList& extraPdfs, LauEmbeddedData* embeddedData); //! Add sPlot branches for the extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] prefix the list of prefixes for the branch names */ void addSPlotNtupleBranches(const LauPdfList& extraPdfs, const TString& prefix); //! Set the branches for the sPlot ntuple with extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] prefix the list of prefixes for the branch names \param [in] iEvt the event number */ Double_t setSPlotNtupleBranchValues(LauPdfList& extraPdfs, const TString& prefix, const UInt_t iEvt); //! Update the signal events after Minuit sets background parameters void updateSigEvents(); //! Add branches to store experiment number and the event number within the experiment virtual void setupSPlotNtupleBranches(); //! Returns the names of all variables in the fit virtual LauSPlot::NameSet variableNames() const; //! Returns the names and yields of species that are free in the fit virtual LauSPlot::NumbMap freeSpeciesNames() const; //! Returns the names and yields of species that are fixed in the fit virtual LauSPlot::NumbMap fixdSpeciesNames() const; //! Returns the species and variables for all 2D PDFs in the fit virtual LauSPlot::TwoDMap twodimPDFs() const; //! Check if the signal is split into well-reconstructed and mis-reconstructed types virtual Bool_t splitSignal() const { return kFALSE; } //! Check if the mis-reconstructed signal is to be smeared in the DP virtual Bool_t scfDPSmear() const { return kFALSE; } //! Add the parameters from each PDF into the fit /*! \param [in] theMap the container of PDFs */ UInt_t addParametersToFitList(LauPdfList* theList); //! Add the parameters from each decay time PDF into the fit /*! \param [in] theMap the container of PDFs */ UInt_t addParametersToFitList(std::vector theVector); //! Calculate the component integrals of the interference term void calcInterferenceTermIntegrals(); //! Calculate the total integral of the interference term void calcInterTermNorm(); private: //! Dalitz plot PDF for the antiparticle LauIsobarDynamics* sigModelB0bar_; //! Dalitz plot PDF for the particle LauIsobarDynamics* sigModelB0_; //! Kinematics object for antiparticle LauKinematics* kinematicsB0bar_; //! Kinematics object for particle LauKinematics* kinematicsB0_; //! The background Dalitz plot models for particles LauBkgndDPModelList BkgndDPModelsB_; //! The background Dalitz plot models for anti-particles LauBkgndDPModelList BkgndDPModelsBbar_; //! The background PDFs LauBkgndPdfsList BkgndPdfs_; //! Background boolean Bool_t usingBkgnd_; //! LauFlavTag object for flavour tagging LauFlavTag* flavTag_; //! Flavour tag for current event std::vector curEvtTagFlv_; //! Per event mistag for current event std::vector curEvtMistag_; //! Per event transformed mistag for current event std::vector curEvtMistagPrime_; //! True tag flavour (i.e. flavour at production) for the current event (only relevant for toy generation) LauFlavTag::Flavour curEvtTrueTagFlv_; //! Flavour at decay for the current event (only relevant for QFS) LauFlavTag::Flavour curEvtDecayFlv_; //! Number of signal components UInt_t nSigComp_; //! Number of signal DP parameters UInt_t nSigDPPar_; //! Number of decay time PDF parameters UInt_t nDecayTimePar_; //! Number of extra PDF parameters UInt_t nExtraPdfPar_; //! Number of normalisation parameters (yields, asymmetries) UInt_t nNormPar_; //! Number of calibration parameters (p0, p1) UInt_t nCalibPar_; //! Number of tagging efficneyc parameters UInt_t nTagEffPar_; //! Number of efficiency parameters (p0, p1) UInt_t nEffiPar_; //! Number of asymmetry parameters UInt_t nAsymPar_; //! Number of tagging asymmetry parameters UInt_t nTagAsymPar_; //! The complex coefficients for antiparticle std::vector coeffsB0bar_; //! The complex coefficients for particle std::vector coeffsB0_; //! Magnitudes and Phases or Real and Imaginary Parts std::vector coeffPars_; //! The integrals of the efficiency corrected amplitude cross terms for each pair of amplitude components /*! Calculated as the sum of A* x Abar x efficiency */ std::vector< std::vector > fifjEffSum_; //! The normalisation for the term 2.0*Re(A*Abar*phiMix) Double_t interTermReNorm_; //! The normalisation for the term 2.0*Im(A*Abar*phiMix) Double_t interTermImNorm_; //! The antiparticle fit fractions LauParArray fitFracB0bar_; //! The particle fit fractions LauParArray fitFracB0_; //! The fit fraction asymmetries std::vector fitFracAsymm_; //! A_CP parameter std::vector acp_; //! The mean efficiency for the antiparticle LauParameter meanEffB0bar_; //! The mean efficiency for the particle LauParameter meanEffB0_; //! The average DP rate for the antiparticle LauParameter DPRateB0bar_; //! The average DP rate for the particle LauParameter DPRateB0_; //! Signal yields LauParameter* signalEvents_; //! Signal asymmetry LauParameter* signalAsym_; //! Background yield(s) LauBkgndYieldList bkgndEvents_; //! Background asymmetries(s) LauBkgndYieldList bkgndAsym_; //! CP eigenvalue variable name TString cpevVarName_; //! CP eigenvalue for current event CPEigenvalue cpEigenValue_; //! Vector to store event CP eigenvalues std::vector evtCPEigenVals_; //! The mass difference between the neutral mass eigenstates LauParameter deltaM_; //! The width difference between the neutral mass eigenstates LauParameter deltaGamma_; //! The lifetime LauParameter tau_; //! The mixing phase LauParameter phiMix_; //! The sine of the mixing phase LauParameter sinPhiMix_; //! The cosine of the mixing phase LauParameter cosPhiMix_; //! Flag whether to use the sine and cosine of the mixing phase or the phase itself as the fit parameters Bool_t useSinCos_; //! e^{-i*phiMix} LauComplex phiMixComplex_; //! Signal Decay time PDFs (one per tagging category) LauDecayTimePdf* signalDecayTimePdf_; //! Background types std::vector BkgndTypes_; //! Background Decay time PDFs (one per tagging category) std::vector BkgndDecayTimePdfs_; //! Decay time for the current event Double_t curEvtDecayTime_; //! Decay time error for the current event Double_t curEvtDecayTimeErr_; //! PDFs for other variables LauPdfList sigExtraPdf_; //! eta PDFs for each TagCat LauPdfList sigFlavTagPdf_; //! eta PDFs for each background LauBkgdPdfMap bkgdFlavTagPdf_; //! Production asymmetry between B0 and B0bar LauParameter AProd_; //! Production asymmetry between B0 and B0bar for bkgnds std::vector AProdBkgnd_; // Toy generation stuff //! The maximum allowed number of attempts when generating an event Int_t iterationsMax_; //! The number of unsucessful attempts to generate an event so far Int_t nGenLoop_; //! The value of A squared for the current event Double_t ASq_; //! The maximum value of A squared that has been seen so far while generating Double_t aSqMaxVar_; //! The maximum allowed value of A squared Double_t aSqMaxSet_; //! Flag for storage of amplitude info in generated ntuple Bool_t storeGenAmpInfo_; //! The signal event tree for embedding fully simulated events LauEmbeddedData* signalTree_; //! The background event tree for embedding fully simulated events std::vector bkgndTree_; //! Boolean to control reuse of embedded signal events Bool_t reuseSignal_; //! Vector of booleans to reuse background events LauBkgndReuseEventsList reuseBkgnd_; // Likelihood values //! Signal DP likelihood value Double_t sigDPLike_; //! Signal likelihood from extra PDFs Double_t sigExtraLike_; Double_t sigFlavTagLike_; Double_t bkgdFlavTagLike_; //! Total signal likelihood Double_t sigTotalLike_; //! Background DP likelihood value(s) std::vector bkgndDPLike_; //! Background likelihood value(s) from extra PDFs std::vector bkgndExtraLike_; //! Total background likelihood(s) std::vector bkgndTotalLike_; ClassDef(LauTimeDepFitModel,0) // Time-dependent neutral model }; #endif diff --git a/inc/LauUniformDecayTimeEfficiency.hh b/inc/LauUniformDecayTimeEfficiency.hh new file mode 100644 index 0000000..0ffeee4 --- /dev/null +++ b/inc/LauUniformDecayTimeEfficiency.hh @@ -0,0 +1,92 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauUniformDecayTimeEfficiency.hh + \brief File containing declaration of LauUniformDecayTimeEfficiency class. +*/ + +/*! \class LauUniformDecayTimeEfficiency + \brief Class for defining a uniform model for decay time efficiency +*/ + +#ifndef LAU_UNIFORM_DECAYTIME_EFFICIENCY +#define LAU_UNIFORM_DECAYTIME_EFFICIENCY + +#include + +#include "Rtypes.h" + +#include "LauAbsDecayTimeEfficiency.hh" + +class LauAbsRValue; + + +class LauUniformDecayTimeEfficiency : public LauAbsDecayTimeEfficiency { + + public: + //! Constructor + LauUniformDecayTimeEfficiency() = default; + + //! Constructor + explicit LauUniformDecayTimeEfficiency( const Double_t efficiency ) : efficiency_{efficiency} {} + + //! Retrieve the efficiency for a given value of the decay time + /*! + \param abscissa the value of the decay time + \return the efficiency + */ + Double_t getEfficiency( [[maybe_unused]] const Double_t abscissa ) const override { return efficiency_; } + + //! Initialise the parameter cache + /*! + Must be called prior to starting fitting or generation + */ + void initialise() override {} + + //! Propagate any updates to parameters and recalculate information as neeeded + /*! + Should be called at each fit iteration + */ + void propagateParUpdates() override {} + + //! Retrieve whether any of the parameters have changed in the latest fit iteration + const bool& anythingChanged() const override { return anythingChanged_; } + + //! Retrieve the parameters of the efficiency model so that they can be loaded into a fit + /*! + \return the parameters of the efficiency model + */ + std::vector getParameters() override { return {}; } + + private: + //! The uniform value of the efficiency + const Double_t efficiency_{1.0}; + + //! Nothing will change but we need to be able to return that information as a reference + const bool anythingChanged_{false}; + + ClassDefOverride(LauUniformDecayTimeEfficiency, 0) +}; + +#endif diff --git a/inc/Laura++_LinkDef.h b/inc/Laura++_LinkDef.h index 9e76f0e..cd9715f 100644 --- a/inc/Laura++_LinkDef.h +++ b/inc/Laura++_LinkDef.h @@ -1,163 +1,187 @@ /* Copyright 2013 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ #ifdef __CINT__ #pragma link off all globals; #pragma link off all classes; #pragma link off all functions; #pragma link C++ class Lau1DCubicSpline+; #pragma link C++ class Lau1DHistPdf+; #pragma link C++ class Lau2DAbsDP+; #pragma link C++ class Lau2DAbsDPPdf+; #pragma link C++ class Lau2DAbsHistDP+; #pragma link C++ class Lau2DAbsHistDPPdf+; #pragma link C++ class Lau2DCubicSpline+; #pragma link C++ class Lau2DHistDP+; #pragma link C++ class Lau2DHistDPPdf+; #pragma link C++ class Lau2DHistPdf+; #pragma link C++ class Lau2DSplineDP+; #pragma link C++ class Lau2DSplineDPPdf+; #pragma link C++ class LauAbsBkgndDPModel+; #pragma link C++ class LauAbsCoeffSet+; #pragma link C++ class LauAbsEffModel+; #pragma link C++ class LauAbsFitter+; #pragma link C++ class LauAbsFitModel+; #pragma link C++ class LauAbsIncohRes+; #pragma link C++ class LauAbsModIndPartWave+; #pragma link C++ class LauAbsPdf+; #pragma link C++ class LauAbsResonance+; #pragma link C++ class LauAbsRValue+; #pragma link C++ class LauArgusPdf+; #pragma link C++ class LauAsymmCalc+; #pragma link C++ class LauBelleCPCoeffSet+; #pragma link C++ class LauBelleNR+; #pragma link C++ class LauBelleSymNR+; #pragma link C++ class LauBifurcatedGaussPdf+; #pragma link C++ class LauBkgndDPModel+; #pragma link C++ class LauBlattWeisskopfFactor+; #pragma link C++ class LauBlind+; #pragma link C++ class LauBreitWignerRes+; #pragma link C++ class LauBsCartesianCPCoeffSet+; #pragma link C++ class LauBsCPFitModel+; #pragma link C++ class LauCacheData+; #pragma link C++ class LauCalcChiSq+; #pragma link C++ class LauCartesianCPCoeffSet+; #pragma link C++ class LauCartesianGammaCPCoeffSet+; #pragma link C++ class LauCategoryFlavTag+; #pragma link C++ class LauChebychevPdf+; #pragma link C++ class LauCleoCPCoeffSet+; #pragma link C++ class LauComplex+; #pragma link C++ class LauCPFitModel+; #pragma link C++ class LauCruijffPdf+; #pragma link C++ class LauCrystalBallPdf+; #pragma link C++ class LauDabbaRes+; #pragma link C++ class LauDatabasePDG+; #pragma link C++ class LauDaughters+; #pragma link C++ class LauDecayTimePdf+; #pragma link C++ class LauDPDepBifurGaussPdf+; #pragma link C++ class LauDPDepCruijffPdf+; #pragma link C++ class LauDPDepGaussPdf+; #pragma link C++ class LauDPDepMapPdf+; #pragma link C++ class LauDPDepSumPdf+; #pragma link C++ class LauEffModel+; #pragma link C++ class LauEFKLLMRes+; #pragma link C++ class LauEmbeddedData+; #pragma link C++ class LauExponentialPdf+; #pragma link C++ class LauFitDataTree+; #pragma link C++ class LauFitNtuple+; #pragma link C++ class LauFitter+; #pragma link C++ class LauFitObject+; #pragma link C++ class LauFlatteRes+; #pragma link C++ class LauFlatNR+; #pragma link C++ class LauFlavTag+; #pragma link C++ class LauFormulaPar+; #pragma link C++ class LauGaussIncohRes+; #pragma link C++ class LauGaussPdf+; #pragma link C++ class LauGenNtuple+; #pragma link C++ class LauGounarisSakuraiRes+; #pragma link C++ class LauIntegrals+; #pragma link C++ class LauDPPartialIntegralInfo+; #pragma link C++ class LauIsobarDynamics+; #pragma link C++ class LauKappaRes+; #pragma link C++ class LauKinematics+; #pragma link C++ class LauKMatrixProdPole+; #pragma link C++ class LauKMatrixProdSVP+; #pragma link C++ class LauKMatrixPropagator+; #pragma link C++ class LauKMatrixPropFactory+; #pragma link C++ class LauLASSBWRes+; #pragma link C++ class LauLASSNRRes+; #pragma link C++ class LauLASSRes+; #pragma link C++ class LauLinearPdf+; #pragma link C++ class LauMagPhaseCoeffSet+; #pragma link C++ class LauMagPhaseCPCoeffSet+; #pragma link C++ class LauMergeDataFiles+; #pragma link C++ class LauMinuit+; #pragma link C++ class LauModIndPartWaveMagPhase+; #pragma link C++ class LauModIndPartWaveRealImag+; #pragma link C++ class LauNovosibirskPdf+; #pragma link C++ class LauNRAmplitude+; #pragma link C++ class LauParameter+; #pragma link C++ class LauParametricStepFuncPdf+; #pragma link C++ class LauParamFixed+; #pragma link C++ class LauParticlePDG+; #pragma link C++ class LauPolNR+; #pragma link C++ class LauPoleRes+; #pragma link C++ class LauPolarFormFactorNR+; #pragma link C++ class LauPolarFormFactorSymNR+; #pragma link C++ class LauPolarGammaCPCoeffSet+; #pragma link C++ class LauPrint+; #pragma link C++ class LauRealImagCoeffSet+; #pragma link C++ class LauRealImagCPCoeffSet+; #pragma link C++ class LauRealImagGammaCPCoeffSet+; #pragma link C++ class LauRelBreitWignerRes+; #pragma link C++ class LauResonanceInfo+; #pragma link C++ class LauRescatteringRes+; #pragma link C++ class LauRescattering2Res+; #pragma link C++ class LauResonanceMaker+; #pragma link C++ class LauResultsExtractor+; #pragma link C++ class LauRhoOmegaMix+; #ifdef DOLAUROOFITTASK #pragma link C++ class LauRooFitTask+; #endif #pragma link C++ class LauScfMap+; #pragma link C++ class LauSigmaRes+; #pragma link C++ class LauSigmoidPdf+; #pragma link C++ class LauSimpleFitModel+; #pragma link C++ class LauSimFitCoordinator+; #pragma link C++ class LauSimFitTask+; #pragma link C++ class LauSPlot+; #pragma link C++ class LauString+; #pragma link C++ class LauSumPdf+; #pragma link C++ class LauTextFileParser+; #pragma link C++ class LauTimeDepFlavModel+; #pragma link C++ class LauTimeDepFitModel+; #pragma link C++ class LauTimeDepNonFlavModel+; #pragma link C++ class LauVetoes+; #pragma link C++ class LauWeightedSumEffModel+; + +#pragma link C++ class LauAbsDecayTimeCalculator+; +#pragma link C++ class LauAbsDecayTimeEfficiency+; +#pragma link C++ class LauAbsDecayTimeIntegrator+; +#pragma link C++ class LauAbsSplineDecayTimeEfficiency+; +//#pragma link C++ class LauAbsSplineDecayTimeEfficiency+; +#pragma link C++ class LauBinnedDecayTimeEfficiency+; +#pragma link C++ class LauDecayTimePhysicsModel+; +#pragma link C++ class LauDecayTimeResolution+; +#pragma link C++ class LauNonSmearedDecayTimeCalculator+; +#pragma link C++ class LauNonSmearedBinnedEfficiencyDecayTimeIntegrator+; +#pragma link C++ class LauNonSmearedSplineEfficiencyDecayTimeIntegrator+; +//#pragma link C++ class LauNonSmearedSplineEfficiencyDecayTimeIntegrator+; +#pragma link C++ class LauNonSmearedUniformEfficiencyDecayTimeIntegrator+; +#pragma link C++ class LauSmearedDecayTimeCalculator+; +#pragma link C++ class LauSmearedBinnedEfficiencyDecayTimeIntegrator+; +#pragma link C++ class LauSmearedSplineEfficiencyDecayTimeIntegrator+; +//#pragma link C++ class LauSmearedSplineEfficiencyDecayTimeIntegrator+; +#pragma link C++ class LauSmearedUniformEfficiencyDecayTimeIntegrator+; +#pragma link C++ class LauSplineDecayTimeEfficiency+; +//#pragma link C++ class LauSplineDecayTimeEfficiency+; +#pragma link C++ class LauUniformDecayTimeEfficiency+; + #pragma link C++ namespace LauConstants+; +#pragma link C++ namespace LauDecayTime+; #pragma link C++ namespace LauRandom+; #endif diff --git a/src/Lau1DCubicSpline.cc b/src/Lau1DCubicSpline.cc index dd4964f..beb65d6 100644 --- a/src/Lau1DCubicSpline.cc +++ b/src/Lau1DCubicSpline.cc @@ -1,528 +1,536 @@ /* Copyright 2015 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file Lau1DCubicSpline.cc \brief File containing implementation of Lau1DCubicSpline class. */ #include #include #include #include #include #include "Lau1DCubicSpline.hh" ClassImp(Lau1DCubicSpline) Lau1DCubicSpline::Lau1DCubicSpline(const std::vector& xs, const std::vector& ys, LauSplineType type, LauSplineBoundaryType leftBound, LauSplineBoundaryType rightBound, Double_t dydx0, Double_t dydxn) : nKnots_(xs.size()), x_(xs), y_(ys), type_(type), leftBound_(leftBound), rightBound_(rightBound), dydx0_(dydx0), dydxn_(dydxn) { init(); } Lau1DCubicSpline::~Lau1DCubicSpline() { } Double_t Lau1DCubicSpline::evaluate(Double_t x) const { // do not attempt to extrapolate the spline if( xx_[nKnots_-1] ) { std::cerr << "WARNING in Lau1DCubicSpline::evaluate : function is only defined between " << x_[0] << " and " << x_[nKnots_-1] << std::endl; std::cerr << " value at " << x << " returned as 0" << std::endl; return 0.; } // first determine which 'cell' of the spline x is in // cell i runs from knot i to knot i+1 Int_t cell(0); while( x > x_[cell+1] ) { ++cell; } // obtain x- and y-values of the neighbouring knots Double_t xLow = x_[cell]; Double_t xHigh = x_[cell+1]; Double_t yLow = y_[cell]; Double_t yHigh = y_[cell+1]; if(type_ == Lau1DCubicSpline::LinearInterpolation) { return yHigh*(x-xLow)/(xHigh-xLow) + yLow*(xHigh-x)/(xHigh-xLow); } // obtain t, the normalised x-coordinate within the cell, // and the coefficients a and b, which are defined in cell i as: // // a_i = k_i *(x_i+1 - x_i) - (y_i+1 - y_i), // b_i = -k_i+1*(x_i+1 - x_i) + (y_i+1 - y_i) // // where k_i is (by construction) the first derivative at knot i Double_t t = (x - xLow) / (xHigh - xLow); Double_t a = dydx_[cell] * (xHigh - xLow) - (yHigh - yLow); Double_t b = -1.*dydx_[cell+1] * (xHigh - xLow) + (yHigh - yLow); Double_t retVal = (1 - t) * yLow + t * yHigh + t * (1 - t) * ( a * (1 - t) + b * t ); return retVal; } void Lau1DCubicSpline::updateYValues(const std::vector& ys) { y_ = ys; this->calcDerivatives(); } void Lau1DCubicSpline::updateYValues(const std::vector& ys) { for (UInt_t i=0; iunblindValue(); } this->calcDerivatives(); } +void Lau1DCubicSpline::updateYValues(const std::vector& ys) +{ + for (UInt_t i=0; iunblindValue(); + } + this->calcDerivatives(); +} + void Lau1DCubicSpline::updateType(LauSplineType type) { if(type_ != type) { type_ = type; this->calcDerivatives(); } } void Lau1DCubicSpline::updateBoundaryConditions(LauSplineBoundaryType leftBound, LauSplineBoundaryType rightBound, Double_t dydx0, Double_t dydxn) { Bool_t updateDerivatives(kFALSE); if(leftBound_ != leftBound || rightBound_ != rightBound) { leftBound_ = leftBound; rightBound_ = rightBound; updateDerivatives = kTRUE; } if(dydx0_ != dydx0) { dydx0_ = dydx0; if(leftBound_ == Lau1DCubicSpline::Clamped) updateDerivatives = kTRUE; } if(dydxn_ != dydxn) { dydxn_ = dydxn; if(rightBound_ == Lau1DCubicSpline::Clamped) updateDerivatives = kTRUE; } if(updateDerivatives) { this->calcDerivatives(); } } std::array Lau1DCubicSpline::getCoefficients(const UInt_t i, const bool normalise) const { std::array result = {0.,0.,0.,0.}; if(i >= nKnots_-1) { std::cerr << "ERROR in Lau1DCubicSpline::getCoefficients requested for too high a knot value" << std::endl; return result; } Double_t xL = x_[i] , xH = x_[i+1]; Double_t yL = y_[i] , yH = y_[i+1]; Double_t h = xH-xL; //This number comes up a lot switch(type_) { case Lau1DCubicSpline::StandardSpline: case Lau1DCubicSpline::AkimaSpline: { Double_t kL = dydx_[i], kH = dydx_[i+1]; //a and b based on definitions from https://en.wikipedia.org/wiki/Spline_interpolation#Algorithm_to_find_the_interpolating_cubic_spline Double_t a = kL*h-(yH-yL); Double_t b =-kH*h+(yH-yL); Double_t denom = -h*h*h;//The terms have a common demoninator result[0] = -b*xL*xL*xH + a*xL*xH*xH + h*h*(xL*yH - xH*yL); result[1] = -a*xH*(2*xL+xH) + b*xL*(xL+2*xH) + h*h*(yL-yH); result[2] = -b*(2*xL+xH) + a*(xL+2*xH); result[3] = -a+b; for(auto& res : result){res /= denom;} break; } /* case Lau1DCubicSpline::AkimaSpline: // Double check the Akima description of splines (in evaluate) right now they're the same except for the first derivatives { //using fomulae from https://asmquantmacro.com/2015/09/01/akima-spline-interpolation-in-excel/ std::function m = [&](Int_t j) //formula to get the straight line gradient { if(j < 0){return 2*m(j+1)-m(j+2);} if(j >= nKnots_){return 2*m(j-1)-m(j-2);} return (y_[j+1]-y_[j]) / (x_[j+1]-x_[j]); }; auto t = [&](Int_t j) { Double_t res = 0.; //originally res was called 't' but that produced a shadow warning Double_t denom = TMath::Abs(m(j+1)-m(j)) + TMath::Abs(m(j-1)-m(j-2)); if(denom == 0){res = (m(j)-m(j-1))/2;} //Special case for when denom = 0 else { res = TMath::Abs(m(j+1)-m(j))*m(j-1) + TMath::Abs(m(j-1)-m(j-2))*m(j); res /= denom; } return res; }; //These are the p's to get the spline in the form p_k(x-xL)^k Double_t pDenom = x_[i+1]/x_[i]; //a denominator used for p[2] and p[3] std::array p = {y_[i],t(i),0.,0.}; //we'll do the last 2 below p[2] = 3*m(i)-2*t(i)-t(i+1); p[2]/= pDenom; p[3] = t(i)+t(i+1)-2*m(i); p[3]/= pDenom*pDenom; //Now finally rearranging the p's into the desired results result[0] = p[0]-p[1]*xL+p[2]*xL*xL-p[3]*xL*xL*xL; result[1] = p[1]-2*p[2]*xL+3*p[3]*xL*xL; result[2] = p[2]-3*p[3]*xL; result[3] = p[3]; break; }*/ case Lau1DCubicSpline::LinearInterpolation: { result[0] = xH*yL-xL*yH; result[1] = yH-yL; for(auto& res : result){res /= h;} break; } } if(normalise) { Double_t integral = this->integral(); for(auto& res : result){res /= integral;} } return result; } Double_t Lau1DCubicSpline::integral() const { Double_t integral = 0.; for(UInt_t iKnot = 0; iKnot < nKnots_ -1; ++iKnot) { Double_t minAbs = x_[iKnot]; Double_t maxAbs = x_[iKnot+1]; std::array coeffs = this -> getCoefficients(iKnot, false); auto integralFunc = [&coeffs](Double_t x){return coeffs[0]*x + coeffs[1]*x*x/2 + coeffs[2]*x*x*x/3 + coeffs[3]*x*x*x*x/4;}; integral += integralFunc(maxAbs); integral -= integralFunc(minAbs); } return integral; } TF1* Lau1DCubicSpline::makeTF1(const bool normalise) const { TString functionString = ""; //make a long piecewise construction of all the spline pieces for(UInt_t i = 0; i < nKnots_-1; ++i) { functionString += Form("(x>%f && x<= %f)*",x_[i],x_[i+1]);//get the bounds of this piece std::array coeffs = this->getCoefficients(i,normalise); functionString += Form("(%f + %f*x + %f*x^2 + %f*x^3)",coeffs[0],coeffs[1],coeffs[2],coeffs[3]); if(i < nKnots_ -2){functionString += " + \n";}//add to all lines except the last } TF1* func = new TF1("SplineFunction", functionString, x_.front(), x_.back()); return func; } TF1* Lau1DCubicSpline::normaliseToTH1(TH1* hist) const { //first define the fit function auto fitf = [this](Double_t* x, Double_t* par) {//there is only 1 x (the abscissa) and 1 par (a scaling of the entire thing) return this->evaluate( x[0] ) * par[0]; }; //Make the function TF1* FittedFunc = new TF1("FittedFunc",fitf,x_.front(),x_.back(),1); //Set the parameter name FittedFunc -> SetParNames("Constant"); //Set the parameter limits and default value FittedFunc -> SetParLimits(0,0.,10.); FittedFunc -> SetParameter(0,1.); hist->Fit("FittedFunc","N O V"); return FittedFunc; } void Lau1DCubicSpline::fitToTH1(TH1* hist) { auto fitf = [this](Double_t* x, Double_t* par) { this -> updateYValues( std::vector(par, par + nKnots_) ); return this -> evaluate( x[0] ); }; //Make the function TF1 FittedFunc("FittedFunc",fitf,x_.front(),x_.back(),nKnots_); const Double_t knotMax = hist->GetMaximum() * 1.5; for(UInt_t knot = 0; knot <= nKnots_ ; ++knot) { FittedFunc.SetParName(knot, Form("Knot%u",knot)); FittedFunc.SetParLimits(knot, 0., knotMax); FittedFunc.SetParameter(knot, y_[knot]); } hist->Fit("FittedFunc","N O V"); return; } void Lau1DCubicSpline::init() { if( y_.size() != x_.size()) { std::cerr << "ERROR in Lau1DCubicSpline::init : The number of y-values given does not match the number of x-values" << std::endl; std::cerr << " Found " << y_.size() << ", expected " << x_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } if( nKnots_ < 3 ) { std::cerr << "ERROR in Lau1DCubicSpline::init : The number of knots is too small" << std::endl; std::cerr << " Found " << nKnots_ << ", expected at least 3 (to have at least 1 internal knot)" << std::endl; gSystem->Exit(EXIT_FAILURE); } dydx_.assign(nKnots_,0.0); a_.assign(nKnots_,0.0); b_.assign(nKnots_,0.0); c_.assign(nKnots_,0.0); d_.assign(nKnots_,0.0); this->calcDerivatives(); } void Lau1DCubicSpline::calcDerivatives() { switch ( type_ ) { case Lau1DCubicSpline::StandardSpline : this->calcDerivativesStandard(); break; case Lau1DCubicSpline::AkimaSpline : this->calcDerivativesAkima(); break; case Lau1DCubicSpline::LinearInterpolation : //derivatives not needed for linear interpolation break; } } void Lau1DCubicSpline::calcDerivativesStandard() { // derivatives are determined such that the second derivative is continuous at internal knots // derivatives, k_i, are the solutions to a set of linear equations of the form: // a_i * k_i-1 + b_i * k+i + c_i * k_i+1 = d_i with a_0 = 0, c_n-1 = 0 // this is solved using the tridiagonal matrix algorithm as on en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm // first and last equations give boundary conditions // - for natural boundary, require f''(x) = 0 at end knot // - for 'not a knot' boundary, require f'''(x) continuous at second knot // - for clamped boundary, require predefined value of f'(x) at end knot // non-zero values of a_0 and c_n-1 would give cyclic boundary conditions a_[0] = 0.; c_[nKnots_-1] = 0.; // set left boundary condition if(leftBound_ == Lau1DCubicSpline::Natural) { b_[0] = 2./(x_[1]-x_[0]); c_[0] = 1./(x_[1]-x_[0]); d_[0] = 3.*(y_[1]-y_[0])/((x_[1]-x_[0])*(x_[1]-x_[0])); } else if(leftBound_ == Lau1DCubicSpline::NotAKnot) { // define the width, h, and the 'slope', delta, of the first cell Double_t h1(x_[1]-x_[0]), h2(x_[2]-x_[0]); Double_t delta1((y_[1]-y_[0])/h1), delta2((y_[2]-y_[1])/h2); // these coefficients can be determined by requiring f'''_0(x_1) = f'''_1(x_1) // the requirement f''_0(x_1) = f''_1(x_1) has been used to remove the dependence on k_2 b_[0] = h2; c_[0] = h1+h2; d_[0] = delta1*(2.*h2*h2 + 3.*h1*h2)/(h1+h2) + delta2*5.*h1*h1/(h1+h2); } else { //Clamped b_[0] = 1.; c_[0] = 0.; d_[0] = dydx0_; } // set right boundary condition if(rightBound_ == Lau1DCubicSpline::Natural) { a_[nKnots_-1] = 1./(x_[nKnots_-1]-x_[nKnots_-2]); b_[nKnots_-1] = 2./(x_[nKnots_-1]-x_[nKnots_-2]); d_[nKnots_-1] = 3.*(y_[nKnots_-1]-y_[nKnots_-2])/((x_[nKnots_-1]-x_[nKnots_-2])*(x_[nKnots_-1]-x_[nKnots_-2])); } else if(rightBound_ == Lau1DCubicSpline::NotAKnot) { // define the width, h, and the 'slope', delta, of the last cell Double_t hnm1(x_[nKnots_-1]-x_[nKnots_-2]), hnm2(x_[nKnots_-2]-x_[nKnots_-3]); Double_t deltanm1((y_[nKnots_-1]-y_[nKnots_-2])/hnm1), deltanm2((y_[nKnots_-2]-y_[nKnots_-3])/hnm2); // these coefficients can be determined by requiring f'''_n-3(x_n-2) = f'''_n-2(x_n-2) // the requirement f''_n-3(x_n-2) = f''_n-2(x_n-2) has been used to remove // the dependence on k_n-3 a_[nKnots_-1] = hnm2 + hnm1; b_[nKnots_-1] = hnm1; d_[nKnots_-1] = deltanm2*hnm1*hnm1/(hnm2+hnm1) + deltanm1*(2.*hnm2*hnm2 + 3.*hnm2*hnm1)/(hnm2+hnm1); } else { //Clamped a_[nKnots_-1] = 0.; b_[nKnots_-1] = 1.; d_[nKnots_-1] = dydxn_; } // the remaining equations ensure that f_i-1''(x_i) = f''_i(x_i) for all internal knots for(UInt_t i=1; i=0; --i) { dydx_[i] = d_[i] - c_[i]*dydx_[i+1]; } } void Lau1DCubicSpline::calcDerivativesAkima() { //derivatives are calculated according to the Akima method // J.ACM vol. 17 no. 4 pp 589-602 Double_t am1(0.), an(0.), anp1(0.); // a[i] is the slope of the segment from point i-1 to point i // // n.b. segment 0 is before point 0 and segment n is after point n-1 // internal segments are numbered 1 - n-1 for(UInt_t i=1; i +#include + +#include "TH1.h" + +#include "LauBinnedDecayTimeEfficiency.hh" + +ClassImp(LauBinnedDecayTimeEfficiency); + + +LauBinnedDecayTimeEfficiency::LauBinnedDecayTimeEfficiency( const TH1& effHist ) : effHist_{ dynamic_cast( effHist.Clone() ) } +{ + // Make sure ROOT won't try to manage the histogram lifetime + effHist_->SetDirectory(nullptr); + + // Normalise the hist if the (relative) efficiencies have very large values + if ( effHist_->GetMaximum() > 1.0 ) { + std::cout << "INFO in LauBinnedDecayTimeEfficiency::LauBinnedDecayTimeEfficiency : Supplied histogram for decay time acceptance has very large values: normalising..." << std::endl; + effHist_->Scale( 1.0 / effHist_->Integral() ); + } +} + +Double_t LauBinnedDecayTimeEfficiency::getEfficiency( const Double_t abscissa ) const +{ + return effHist_->GetBinContent(effHist_->FindFixBin(abscissa)); +} + +std::vector LauBinnedDecayTimeEfficiency::getBinningInfo() const +{ + const Int_t nBins { effHist_->GetNbinsX() }; + std::vector bins; + bins.reserve(nBins); + for ( Int_t bin{1}; bin <= nBins; ++bin ) { + const Double_t loEdge {effHist_->GetBinLowEdge(bin)}; + const Double_t hiEdge {loEdge + effHist_->GetBinWidth(bin)}; + const Double_t effVal {effHist_->GetBinContent(bin)}; + bins.emplace_back( loEdge, hiEdge, effVal ); + } + return bins; +} diff --git a/src/LauDecayTimePdf.cc b/src/LauDecayTimePdf.cc index 7dc5edb..850b164 100644 --- a/src/LauDecayTimePdf.cc +++ b/src/LauDecayTimePdf.cc @@ -1,1724 +1,613 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauDecayTimePdf.cc \brief File containing implementation of LauDecayTimePdf class. */ #include #include #include #include #include #include #include "TMath.h" #include "TRandom.h" #include "TSystem.h" #include "TH1.h" #include "RooMath.h" #include "Lau1DCubicSpline.hh" #include "Lau1DHistPdf.hh" #include "LauConstants.hh" #include "LauComplex.hh" #include "LauDecayTimePdf.hh" #include "LauFitDataTree.hh" #include "LauParameter.hh" #include "LauParamFixed.hh" #include "LauRandom.hh" +#include "LauNonSmearedDecayTimeCalculator.hh" +#include "LauNonSmearedUniformEfficiencyDecayTimeIntegrator.hh" +#include "LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.hh" +#include "LauNonSmearedSplineEfficiencyDecayTimeIntegrator.hh" +#include "LauSmearedDecayTimeCalculator.hh" +#include "LauSmearedUniformEfficiencyDecayTimeIntegrator.hh" +#include "LauSmearedBinnedEfficiencyDecayTimeIntegrator.hh" +#include "LauSmearedSplineEfficiencyDecayTimeIntegrator.hh" ClassImp(LauDecayTimePdf) -LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, - Double_t minAbscissaVal, Double_t maxAbscissaVal, - Double_t minAbscissaErr, Double_t maxAbscissaErr, - FuncType type, UInt_t nGauss, const std::vector& scale, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : - varName_(theVarName), - varErrName_(theVarErrName), - params_(params), - smear_(kTRUE), - minAbscissa_(minAbscissaVal), - maxAbscissa_(maxAbscissaVal), - minAbscissaError_(minAbscissaErr), - maxAbscissaError_(maxAbscissaErr), - abscissaError_(0.0), - abscissaErrorGenerated_(kFALSE), - errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 - errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 - nGauss_(nGauss), - tau_(nullptr), - deltaM_(nullptr), - deltaGamma_(nullptr), - fracPrompt_(nullptr), - type_(type), - method_(method), - effMethod_(effMethod), - scaleMeans_(scale), - scaleWidths_(scale), - scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), kFALSE, std::logical_or() ) ), - expTerm_(0.0), - cosTerm_(0.0), - sinTerm_(0.0), - coshTerm_(0.0), - sinhTerm_(0.0), - normTermExp_(0.0), - normTermCosh_(0.0), - normTermSinh_(0.0), - errTerm_(0.0), - effiTerm_(0.0), - pdfTerm_(0.0), - maxHeight_(0.0), - heightUpToDate_(kFALSE), - errHist_(nullptr), - pdfHist_(nullptr), - effiFun_(nullptr), - effiHist_(nullptr), - effiPars_(0) -{ - if (nGauss > 0) - { - frac_.assign(nGauss_-1,nullptr); - mean_.assign(nGauss_,nullptr); - sigma_.assign(nGauss_,nullptr); - meanVals_.assign(nGauss_,0.0); - sigmaVals_.assign(nGauss_,0.0); - fracVals_.assign(nGauss_,0.0); - } -} - -LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const TString& theVarErrName, const std::vector& params, - Double_t minAbscissaVal, Double_t maxAbscissaVal, - Double_t minAbscissaErr, Double_t maxAbscissaErr, - FuncType type, UInt_t nGauss, const std::vector& scaleMeans, const std::vector& scaleWidths, const TimeMeasurementMethod method, const EfficiencyMethod effMethod) : - varName_(theVarName), - varErrName_(theVarErrName), - params_(params), - smear_(kTRUE), - minAbscissa_(minAbscissaVal), - maxAbscissa_(maxAbscissaVal), - minAbscissaError_(minAbscissaErr), - maxAbscissaError_(maxAbscissaErr), - abscissaError_(0.0), - abscissaErrorGenerated_(kFALSE), - errorDistMPV_(0.230), // for signal 0.234, for qqbar 0.286 - errorDistSigma_(0.075), // for signal 0.073, for qqbar 0.102 - nGauss_(nGauss), - tau_(nullptr), - deltaM_(nullptr), - deltaGamma_(nullptr), - fracPrompt_(nullptr), - type_(type), - method_(method), - effMethod_(effMethod), - scaleMeans_(scaleMeans), - scaleWidths_(scaleWidths), - scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), kFALSE, std::logical_or() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or() ) ), - expTerm_(0.0), - cosTerm_(0.0), - sinTerm_(0.0), - coshTerm_(0.0), - sinhTerm_(0.0), - normTermExp_(0.0), - normTermCosh_(0.0), - normTermSinh_(0.0), - errTerm_(0.0), - effiTerm_(0.0), - pdfTerm_(0.0), - maxHeight_(0.0), - heightUpToDate_(kFALSE), - errHist_(nullptr), - pdfHist_(nullptr), - effiFun_(nullptr), - effiHist_(nullptr), - effiPars_(0) -{ - if (nGauss > 0) - { - frac_.assign(nGauss_-1,nullptr); - mean_.assign(nGauss_,nullptr); - sigma_.assign(nGauss_,nullptr); - meanVals_.assign(nGauss_,0.0); - sigmaVals_.assign(nGauss_,0.0); - fracVals_.assign(nGauss_,0.0); - } -} - -LauDecayTimePdf::~LauDecayTimePdf() -{ - // Destructor - delete errHist_; errHist_ = nullptr; - delete pdfHist_; pdfHist_ = nullptr; - delete effiFun_; effiFun_ = nullptr; - delete effiHist_; effiHist_ = nullptr; - for( auto& par : effiPars_ ){ delete par; par = nullptr; } - effiPars_.clear(); +LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + const TH1* dtHist, + const LauDecayTime::TimeMeasurementMethod method) : + type_{LauDecayTime::FuncType::Hist}, + method_{method}, + varName_{theVarName}, + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + pdfHist_{ (dtHist==nullptr) ? nullptr : std::make_unique( varName_, dtHist, minAbscissa_, maxAbscissa_ ) } + // TODO - check this, once we've consolidated all the data members +{ +} + +LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + const TString& theVarErrName, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, + const TH1* dtHist, + const TH1* dtErrHist, + const LauDecayTime::TimeMeasurementMethod method) : + type_{LauDecayTime::FuncType::Hist}, + method_{method}, + varName_{theVarName}, + varErrName_{theVarErrName}, + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + minAbscissaError_{minAbscissaErr}, + maxAbscissaError_{maxAbscissaErr}, + pdfHist_{ (dtHist==nullptr) ? nullptr : std::make_unique( varName_, dtHist, minAbscissa_, maxAbscissa_ ) }, + errHist_{ (dtErrHist==nullptr) ? nullptr : std::make_unique( varErrName_, dtErrHist, minAbscissaError_, maxAbscissaError_ ) } + // TODO - check this, once we've consolidated all the data members +{ +} + +LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + std::unique_ptr physicsModel, + const LauDecayTime::TimeMeasurementMethod method) : + type_{physicsModel->getFunctionType()}, + method_{method}, + varName_{theVarName}, + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physicsModel_{std::move(physicsModel)} + // TODO - check this, once we've consolidated all the data members +{ + auto effModel = std::make_unique(1.0); + calculator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_ ); + integrator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_, *effModel ); + efficiencyModel_ = std::move( effModel ); +} + +LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + std::unique_ptr physicsModel, + std::unique_ptr resolutionModel, + const LauDecayTime::TimeMeasurementMethod method) : + type_{physicsModel->getFunctionType()}, + method_{method}, + varName_{theVarName}, + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physicsModel_{std::move(physicsModel)}, + resolutionModel_{std::move(resolutionModel)}, + smear_{true} + // TODO - check this, once we've consolidated all the data members +{ + auto effModel = std::make_unique(1.0); + calculator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_, *resolutionModel_ ); + integrator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_, *effModel, *resolutionModel_ ); + efficiencyModel_ = std::move( effModel ); +} + +LauDecayTimePdf::LauDecayTimePdf(const TString& theVarName, const Double_t minAbscissaVal, const Double_t maxAbscissaVal, + const TString& theVarErrName, const Double_t minAbscissaErr, const Double_t maxAbscissaErr, + std::unique_ptr physicsModel, + std::unique_ptr resolutionModel, + const TH1* dtErrHist, + const LauDecayTime::TimeMeasurementMethod method) : + type_{physicsModel->getFunctionType()}, + method_{method}, + varName_{theVarName}, + varErrName_{theVarErrName}, + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + minAbscissaError_{minAbscissaErr}, + maxAbscissaError_{maxAbscissaErr}, + physicsModel_{std::move(physicsModel)}, + resolutionModel_{std::move(resolutionModel)}, + smear_{true}, + scaleWithPerEventError_{true}, + errHist_{ (dtErrHist==nullptr) ? nullptr : std::make_unique( varErrName_, dtErrHist, minAbscissaError_, maxAbscissaError_ ) } + // TODO - check this, once we've consolidated all the data members +{ + auto effModel = std::make_unique(1.0); + calculator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_, *resolutionModel_ ); + integrator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_, *effModel, *resolutionModel_ ); + efficiencyModel_ = std::move( effModel ); } void LauDecayTimePdf::initialise() { - // The parameters are: - // - the mean and the sigma (bias and spread in resolution) of the gaussian(s) - // - the mean lifetime, denoted tau, of the exponential decay - // - the frequency of oscillation, denoted Delta m, of the cosine and sine terms - // - the decay width difference, denoted Delta Gamma, of the hyperbolic cosine and sine terms - // - // The next two arguments specify the range in which the PDF is defined, - // and the PDF will be normalised w.r.t. these limits. - // - // The final three arguments define the type of Delta t PDF (Delta, Exp, ExpTrig or ExpHypTrig ), the number of gaussians - // and whether or not the gaussian parameters should be scaled by the per-event errors on Delta t - - if (type_ == FuncType::Hist) { - if (nGauss_ != 0){ - std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should not have a resolution function"<Exit(EXIT_FAILURE); - } - if (this->nParameters() != 0){ - std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have 0 parameters"<Exit(EXIT_FAILURE); - } - if ( pdfHist_ == nullptr ) { - std::cerr<<"ERROR in LauDecayTimePdf::initialise : Hist PDF should have a histogram PDF supplied"<Exit(EXIT_FAILURE); } } - // Check whether the scale vector is nGauss in size - if (nGauss_ != scaleMeans_.size() || nGauss_ != scaleWidths_.size()) { - std::cerr<<"ERROR in LauDecayTimePdf::initialise : scale vector size not the same as nGauss."<Exit(EXIT_FAILURE); - } - - TString meanName("mean_"); - TString sigmaName("sigma_"); - TString fracName("frac_"); - Bool_t foundParams(kTRUE); - - for (UInt_t i(0); ifindParameter(tempName); - foundParams &= (mean_[i] != nullptr); - sigma_[i] = this->findParameter(tempName2); - foundParams &= (sigma_[i] != nullptr); - if (i!=0) { - frac_[i-1] = this->findParameter(tempName3); - foundParams &= (frac_[i-1] != nullptr); + if ( smear_ and scaleWithPerEventError_ ) { + if ( not errHist_ ) { + std::cerr << "ERROR in LauDecayTimePdf::initialise : scaling with per-event error but no error distribution supplied" << std::endl; + gSystem->Exit(EXIT_FAILURE); } } - switch ( type_ ) { - case FuncType::Hist : - // Nothing to check! - break; - case FuncType::Delta : - if ((this->nParameters() != (3*nGauss_-1)) || (!foundParams)) { - std::cerr<<"ERROR in LauDecayTimePdf::initialise : Delta type PDF requires:"<Exit(EXIT_FAILURE); - } - break; - case FuncType::Exp : - tau_ = this->findParameter("tau"); - foundParams &= (tau_ != nullptr); - if ((this->nParameters() != (3*nGauss_-1+1)) || (!foundParams)) { - std::cerr<<"ERROR in LauDecayTimePdf::initialise : Exp type PDF requires:"<Exit(EXIT_FAILURE); - } - break; - case FuncType::DeltaExp : - tau_ = this->findParameter("tau"); - fracPrompt_ = this->findParameter("frac_prompt"); - foundParams &= (tau_ != nullptr); - foundParams &= (fracPrompt_ != nullptr); - if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { - std::cerr<<"ERROR in LauDecayTimePdf::initialise : DeltaExp type PDF requires:"<Exit(EXIT_FAILURE); - } - break; - case FuncType::ExpTrig : - tau_ = this->findParameter("tau"); - deltaM_ = this->findParameter("deltaM"); - foundParams &= (tau_ != nullptr); - foundParams &= (deltaM_ != nullptr); - if ((this->nParameters() != (3*nGauss_-1+2)) || (!foundParams)) { - std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpTrig type PDF requires:"<Exit(EXIT_FAILURE); - } - break; - case FuncType::ExpHypTrig : - tau_ = this->findParameter("tau"); - deltaM_ = this->findParameter("deltaM"); - deltaGamma_ = this->findParameter("deltaGamma"); - foundParams &= (tau_ != nullptr); - foundParams &= (deltaM_ != nullptr); - foundParams &= (deltaGamma_ != nullptr); - if ((this->nParameters() != (3*nGauss_-1+3)) || (!foundParams)) { - std::cerr<<"ERROR in LauDecayTimePdf::initialise : ExpHypTrig type PDF requires:"<Exit(EXIT_FAILURE); - } - break; - } + // TODO other consistency checks? - // Setup the normalisation caches - normTermsExp_.clear(); normTermsExp_.resize(1); - normTermsCos_.clear(); normTermsCos_.resize(1); - normTermsSin_.clear(); normTermsSin_.resize(1); - normTermsCosh_.clear(); normTermsCosh_.resize(1); - normTermsSinh_.clear(); normTermsSinh_.resize(1); - if ( effMethod_ == EfficiencyMethod::Spline ) { - const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; - if ( not this->doSmearing() ) { - expTermIkVals_.clear(); expTermIkVals_.resize(nSplineSegments); - trigTermIkVals_.clear(); trigTermIkVals_.resize(nSplineSegments); - hypHTermIkVals_.clear(); hypHTermIkVals_.resize(nSplineSegments); - hypLTermIkVals_.clear(); hypLTermIkVals_.resize(nSplineSegments); - } else { - // Set outer vectors to size 1 (will be resized to nEvents in cacheInfo if necessary) - meanPowerVals_.clear(); meanPowerVals_.resize(1); meanPowerVals_.front().resize(nGauss_); - sigmaPowerVals_.clear(); sigmaPowerVals_.resize(1); sigmaPowerVals_.front().resize(nGauss_); - - expTermKvecVals_.clear(); expTermKvecVals_.resize(1); expTermKvecVals_.front().resize(nGauss_); - trigTermKvecVals_.clear(); trigTermKvecVals_.resize(1); trigTermKvecVals_.front().resize(nGauss_); - hypHTermKvecVals_.clear(); hypHTermKvecVals_.resize(1); hypHTermKvecVals_.front().resize(nGauss_); - hypLTermKvecVals_.clear(); hypLTermKvecVals_.resize(1); hypLTermKvecVals_.front().resize(nGauss_); - - expTermMvecVals_.clear(); expTermMvecVals_.resize(1); expTermMvecVals_.front().resize(nSplineSegments); - for ( auto& innerVec : expTermMvecVals_.front() ) { innerVec.resize(nGauss_); } - trigTermMvecVals_.clear(); trigTermMvecVals_.resize(1); trigTermMvecVals_.front().resize(nSplineSegments); - for ( auto& innerVec : trigTermMvecVals_.front() ) { innerVec.resize(nGauss_); } - hypHTermMvecVals_.clear(); hypHTermMvecVals_.resize(1); hypHTermMvecVals_.front().resize(nSplineSegments); - for ( auto& innerVec : hypHTermMvecVals_.front() ) { innerVec.resize(nGauss_); } - hypLTermMvecVals_.clear(); hypLTermMvecVals_.resize(1); hypLTermMvecVals_.front().resize(nSplineSegments); - for ( auto& innerVec : hypLTermMvecVals_.front() ) { innerVec.resize(nGauss_); } - } - } + // Initialise the physics model + physicsModel_->initialise(); - // Force calculation of all relevant info by faking that all parameter values have changed - nothingFloating_ = nothingChanged_ = kFALSE; - anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); - nonKnotFloating_ = nonKnotChanged_ = kTRUE; - physicsParFloating_ = physicsParChanged_ = kTRUE; - tauFloating_ = tauChanged_ = ( tau_ != nullptr ); - deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); - deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); - resoParFloating_ = resoParChanged_ = kTRUE; -} + // Initialise the resolution model + if ( resolutionModel_ ) { + resolutionModel_->initialise(); + } -Double_t LauDecayTimePdf::effectiveResolution() const -{ - Double_t dilution = 0.; + // Initialise the efficiency model + efficiencyModel_->initialise(); - Double_t dMSq = deltaM_->unblindValue() * deltaM_->unblindValue(); + // Get all the parameters and consolidate them so we can pass them to the fit model + std::vector physicsPars { physicsModel_->getParameters() }; + physicsParFloating_ = this->anyParFloating( physicsPars ); + this->addParams( physicsPars ); - // Might be cleaner to just append this to the vector in the init step, - // the the consistency can also be checked + if ( resolutionModel_ ) { + std::vector resoPars { resolutionModel_->getParameters() }; + resoParFloating_ = this->anyParFloating( resoPars ); + this->addParams( resoPars ); + } - Double_t fracSum = 0; - for (auto f : frac_) fracSum += f->unblindValue(); + std::vector effiPars { efficiencyModel_->getParameters() }; + effiParFloating_ = this->anyParFloating( effiPars ); + this->addParams( effiPars ); - Double_t lastFrac = 1. - fracSum; + anythingFloating_ = physicsParFloating_ or resoParFloating_ or effiParFloating_; +} - for (size_t i = 0; i < sigma_.size(); i++) { +void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData) +{ + const std::size_t nEvents { inputData.nEvents() }; - Double_t sigSq = sigma_[i]->unblindValue() * sigma_[i]->unblindValue(); + // If we're a histogram form then there's not so much to do + if ( type_ == LauDecayTime::FuncType::Hist ) { + // Pass the data to the decay-time PDF for caching + pdfHist_->cacheInfo(inputData); - Double_t thisFrac = lastFrac; - if (i < sigma_.size() - 1) thisFrac = frac_[i]->unblindValue(); + // Pass the data to the decay-time error PDF for caching + if ( errHist_ ) { + errHist_->cacheInfo(inputData); + } - dilution += thisFrac * TMath::Exp(-dMSq * 0.5 * sigSq); + // Make cache of effiTerms + // Efficiency will always be 1 by definition + effiTerms_.assign(nEvents,1.0); + return; } - return TMath::Sqrt(-2. * TMath::Log(dilution)) / deltaM_->unblindValue(); -} - + // Otherwise... -void LauDecayTimePdf::cacheInfo(const LauFitDataTree& inputData) -{ // Check that the input data contains the decay time variable - Bool_t hasBranch = inputData.haveBranch(this->varName()); + bool hasBranch { inputData.haveBranch( varName_ ) }; if (!hasBranch) { - std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varName()<<"\"."<Exit(EXIT_FAILURE); } // If we're scaling by the per-event error, also check that the input data contains the decay time error variable - const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ }; + const bool needPerEventNormTerms { smear_ and scaleWithPerEventError_ }; if ( needPerEventNormTerms ) { - hasBranch = inputData.haveBranch(this->varErrName()); + hasBranch = inputData.haveBranch( varErrName_ ); if (!hasBranch) { - std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Input data does not contain variable \""<varErrName()<<"\"."<Exit(EXIT_FAILURE); } // Pass the data to the decay-time error PDF for caching - if ( errHist_ ) { - errHist_->cacheInfo(inputData); - } + errHist_->cacheInfo(inputData); } - if (type_ == Hist) { - // Pass the data to the decay-time PDF for caching - pdfHist_->cacheInfo(inputData); - // Make cache of effiTerms - const UInt_t nEvents = inputData.nEvents(); - // Efficiency will always be 1 (assumedly) - effiTerms_.assign(nEvents,1.); - } else { - // Clear the vectors and reserve enough space in the caches of the terms - const UInt_t nEvents = inputData.nEvents(); - - abscissas_.clear(); abscissas_.resize(nEvents); - abscissaErrors_.clear(); abscissaErrors_.resize(nEvents); - expTerms_.clear(); expTerms_.resize(nEvents); - cosTerms_.clear(); cosTerms_.resize(nEvents); - sinTerms_.clear(); sinTerms_.resize(nEvents); - coshTerms_.clear(); coshTerms_.resize(nEvents); - sinhTerms_.clear(); sinhTerms_.resize(nEvents); - effiTerms_.clear(); effiTerms_.resize(nEvents); - - // Also resize the normalisation cache elements if we're doing per-event resolution - if ( needPerEventNormTerms ) { - - normTermsExp_.clear(); normTermsExp_.resize(nEvents); - normTermsCos_.clear(); normTermsCos_.resize(nEvents); - normTermsSin_.clear(); normTermsSin_.resize(nEvents); - normTermsCosh_.clear(); normTermsCosh_.resize(nEvents); - normTermsSinh_.clear(); normTermsSinh_.resize(nEvents); - - if ( effMethod_ == EfficiencyMethod::Spline ) { - meanPowerVals_.resize(nEvents); - for ( auto& innerVec : meanPowerVals_ ) { innerVec.resize(nGauss_); } - sigmaPowerVals_.resize(nEvents); - for ( auto& innerVec : sigmaPowerVals_ ) { innerVec.resize(nGauss_); } - - expTermKvecVals_.resize(nEvents); - for ( auto& innerVec : expTermKvecVals_ ) { innerVec.resize(nGauss_); } - trigTermKvecVals_.resize(nEvents); - for ( auto& innerVec : trigTermKvecVals_ ) { innerVec.resize(nGauss_); } - hypHTermKvecVals_.resize(nEvents); - for ( auto& innerVec : hypHTermKvecVals_ ) { innerVec.resize(nGauss_); } - hypLTermKvecVals_.resize(nEvents); - for ( auto& innerVec : hypLTermKvecVals_ ) { innerVec.resize(nGauss_); } - - const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; - - expTermMvecVals_.resize(nEvents); - for ( auto& middleVec : expTermMvecVals_) { - middleVec.resize(nSplineSegments); - for ( auto& innerVec : middleVec ) { - innerVec.resize(nGauss_); - } - } - trigTermMvecVals_.resize(nEvents); - for ( auto& middleVec : trigTermMvecVals_) { - middleVec.resize(nSplineSegments); - for ( auto& innerVec : middleVec ) { - innerVec.resize(nGauss_); - } - } - hypHTermMvecVals_.resize(nEvents); - for ( auto& middleVec : hypHTermMvecVals_) { - middleVec.resize(nSplineSegments); - for ( auto& innerVec : middleVec ) { - innerVec.resize(nGauss_); - } - } - hypLTermMvecVals_.resize(nEvents); - for ( auto& middleVec : hypLTermMvecVals_) { - middleVec.resize(nSplineSegments); - for ( auto& innerVec : middleVec ) { - innerVec.resize(nGauss_); - } - } - } - } - - // Determine the abscissa and abscissa error values for each event - for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) { + // Clear the vectors and reserve enough space in the caches of the terms + abscissas_.clear(); abscissas_.resize(nEvents); + abscissaErrors_.clear(); abscissaErrors_.resize(nEvents); + termsStore_.clear(); termsStore_.resize(nEvents); + effiTerms_.clear(); effiTerms_.resize(nEvents); - const LauFitData& dataValues = inputData.getData(iEvt); - - const Double_t abscissa { dataValues.at(this->varName()) }; - - if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { - std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."<Exit(EXIT_FAILURE); - } + // Correctly size the normalisation cache elements + // depending on whether we're doing per-event resolution + normTermsStore_.clear(); + if ( needPerEventNormTerms ) { + normTermsStore_.resize(nEvents); + } else { + normTermsStore_.resize(1); + } - abscissas_[iEvt] = abscissa ; + // Determine the abscissa and abscissa error values for each event + for ( std::size_t iEvt {0}; iEvt < nEvents; iEvt++ ) { - const Double_t abscissaErr { needPerEventNormTerms ? dataValues.at(this->varErrName()) : 0.0 }; + const LauFitData& dataValues { inputData.getData(iEvt) }; - if ( needPerEventNormTerms and ( abscissaErr > this->maxAbscissaError() or abscissaErr < this->minAbscissaError() ) ) { - std::cerr<<"ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."<Exit(EXIT_FAILURE); - } + const Double_t abscissa { dataValues.at( varName_ ) }; - abscissaErrors_[iEvt] = abscissaErr; + if ( abscissa > maxAbscissa_ or abscissa < minAbscissa_ ) { + std::cerr << "ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay time: " << abscissa + << " is outside allowed range: [" << minAbscissa_ << "," << maxAbscissa_ << "]" << std::endl; + gSystem->Exit(EXIT_FAILURE); } - // Force calculation of all info by faking that all parameter values have changed - nothingFloating_ = nothingChanged_ = kFALSE; - anyKnotFloating_ = anyKnotChanged_ = not effiPars_.empty(); - nonKnotFloating_ = nonKnotChanged_ = kTRUE; - physicsParFloating_ = physicsParChanged_ = kTRUE; - tauFloating_ = tauChanged_ = ( tau_ != nullptr ); - deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); - deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); - resoParFloating_ = resoParChanged_ = kTRUE; - - // Fill the rest of the cache - this->updateCache(); - - // Set the various "parameter-is-floating" flags, used to bookkeep the cache in propagateParUpdates - LauParamFixed isFixed; - nonKnotFloating_ = not std::all_of(params_.begin(), params_.end(), isFixed); - anyKnotFloating_ = not std::all_of(effiPars_.begin(), effiPars_.end(), isFixed); - nothingFloating_ = not (nonKnotFloating_ or anyKnotFloating_); + abscissas_[iEvt] = abscissa; - std::cout << "INFO in LauDecayTimePdf::cacheInfo : nothing floating set to: " << (nothingFloating_ ? "True" : "False") << std::endl; - std::cout << "INFO in LauDecayTimePdf::cacheInfo : any knot floating set to: " << (anyKnotFloating_ ? "True" : "False") << std::endl; - std::cout << "INFO in LauDecayTimePdf::cacheInfo : non-knot floating set to: " << (nonKnotFloating_ ? "True" : "False") << std::endl; + const Double_t abscissaErr { needPerEventNormTerms ? dataValues.at( varErrName_ ) : 0.0 }; - tauFloating_ = tau_ ? not tau_->fixed() : kFALSE; - deltaMFloating_ = deltaM_ ? not deltaM_->fixed() : kFALSE; - deltaGammaFloating_ = deltaGamma_ ? not deltaGamma_->fixed() : kFALSE; - - physicsParFloating_ = ( tauFloating_ or deltaMFloating_ or deltaGammaFloating_ ); - - std::cout << "INFO in LauDecayTimePdf::cacheInfo : tau floating set to: " << (tauFloating_ ? "True" : "False") << std::endl; - std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaM floating set to: " << (deltaMFloating_ ? "True" : "False") << std::endl; - std::cout << "INFO in LauDecayTimePdf::cacheInfo : deltaGamma floating set to: " << (deltaGammaFloating_ ? "True" : "False") << std::endl; + if ( errHist_ and ( abscissaErr > maxAbscissaError_ or abscissaErr < minAbscissaError_ ) ) { + std::cerr << "ERROR in LauDecayTimePdf::cacheInfo : Given value of the decay-time error: " << abscissaErr + << " is outside allowed range: [" << minAbscissaError_ << "," << maxAbscissaError_ << "]." << std::endl; + gSystem->Exit(EXIT_FAILURE); + } - resoParFloating_ = kFALSE; - for ( UInt_t i{0}; i < nGauss_; ++i ) - { - const Bool_t meanFloating { not mean_[i]->fixed() }; - const Bool_t sigmaFloating { not sigma_[i]->fixed() }; + abscissaErrors_[iEvt] = abscissaErr; + } - resoParFloating_ |= (meanFloating or sigmaFloating); + // Cache the numerator terms + calculator_->cacheInfo( abscissas_, abscissaErrors_ ); - std::cout << "INFO in LauDecayTimePdf::cacheInfo : mean[" << i << "] floating set to: " << (meanFloating ? "True" : "False") << std::endl; - std::cout << "INFO in LauDecayTimePdf::cacheInfo : sigma[" << i << "] floating set to: " << (sigmaFloating ? "True" : "False") << std::endl; + // Cache the normalisations + integrator_->cacheInfo( abscissaErrors_ ); - if ( i < (nGauss_ - 1) ) { - const Bool_t fracFloating { not frac_[i]->fixed() }; + // Force storing of all info this first time around + physicsParChanged_ = true; + resoParChanged_ = true; + effiParChanged_ = true; + this->updateCache(); +} - resoParFloating_ |= fracFloating; +bool LauDecayTimePdf::anyParFloating( const std::vector& pars ) const +{ + LauParamFixed isFixed; + return not std::all_of( pars.begin(), pars.end(), isFixed ); +} - std::cout << "INFO in LauDecayTimePdf::cacheInfo : frac[" << i << "] floating set to: " << (fracFloating ? "True" : "False") << std::endl; - } - } +void LauDecayTimePdf::addParams( std::vector& pars ) +{ + for ( auto& par : pars ) { + params_.push_back( par ); } } void LauDecayTimePdf::updateCache() { - // Get the updated values of all parameters - static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();}; - - if ( anyKnotChanged_ ) { - std::transform( effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), assignValue ); - } - - if ( tauChanged_ ) { - tauVal_ = tau_->unblindValue(); - gammaVal_ = 1.0 / tauVal_; - } - - if ( deltaMChanged_ ) { - deltaMVal_ = deltaM_->unblindValue(); - } - - if ( deltaGammaChanged_ ) { - deltaGammaVal_ = deltaGamma_->unblindValue(); - } - - if ( resoParChanged_ ) { - std::transform( mean_.begin(), mean_.end(), meanVals_.begin(), assignValue ); - std::transform( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), assignValue ); - std::transform( frac_.begin(), frac_.end(), fracVals_.begin()+1, assignValue ); - fracVals_[0] = std::accumulate( fracVals_.begin()+1, fracVals_.end(), 1.0, std::minus{} ); - } - // Calculate the values of all terms for each event - // TODO - need to sort out UInt_t vs ULong_t everywhere!! - const UInt_t nEvents { static_cast(abscissas_.size()) }; + const std::size_t nEvents { abscissas_.size() }; // If any of the physics or resolution parameters have changed we need - // to update everything, otherwise we only need to recalculate the - // efficiency - if ( nonKnotChanged_ ) { - for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) { - const Double_t abscissa { abscissas_[iEvt] }; - const Double_t abscissaErr { abscissaErrors_[iEvt] }; - this->calcLikelihoodInfo(abscissa, abscissaErr); - expTerms_[iEvt] = expTerm_; - cosTerms_[iEvt] = cosTerm_; - sinTerms_[iEvt] = sinTerm_; - coshTerms_[iEvt] = coshTerm_; - sinhTerms_[iEvt] = sinhTerm_; - effiTerms_[iEvt] = effiTerm_; + // to update all numerator terms + if ( physicsParChanged_ or resoParChanged_ ) { + for (std::size_t iEvt {0}; iEvt < nEvents; iEvt++) { + termsStore_[iEvt] = calculator_->getTerms( iEvt ); } - } else { - for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) { - const Double_t abscissa { abscissas_[iEvt] }; - effiTerm_ = this->calcEffiTerm( abscissa ); - effiTerms_[iEvt] = effiTerm_; + } + + // If any of the efficiency parameters have changed we need to + // recalculate the efficiency + if ( effiParChanged_ ) { + for (std::size_t iEvt {0}; iEvt < nEvents; iEvt++) { + effiTerms_[iEvt] = this->calcEffiTerm( abscissas_[iEvt] ); } } // Calculate the normalisation terms - this->calcNorm(); - - // reset the "parameter-has-changed" flags - anyKnotChanged_ = kFALSE; - tauChanged_ = kFALSE; - deltaMChanged_ = kFALSE; - deltaGammaChanged_ = kFALSE; - physicsParChanged_ = kFALSE; - resoParChanged_ = kFALSE; - nonKnotChanged_ = kFALSE; + // If we're not doing per-event scaling, + // we only need to calculate the normalisations once + const std::array nNormEntries { 1, nEvents }; + const bool needPerEventNormTerms { smear_ and scaleWithPerEventError_ }; + for ( std::size_t iEvt{0}; iEvt < nNormEntries[needPerEventNormTerms]; ++iEvt ) { + normTermsStore_[iEvt] = integrator_->getNormTerms( iEvt ); + } } -void LauDecayTimePdf::calcLikelihoodInfo(const UInt_t iEvt) +void LauDecayTimePdf::calcLikelihoodInfo(const std::size_t iEvt) { // Extract all the terms and their normalisations - const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ }; - const UInt_t normTermElement { needPerEventNormTerms * iEvt }; - if (type_ == Hist) { + if (type_ == LauDecayTime::FuncType::Hist) { pdfHist_->calcLikelihoodInfo(iEvt); - pdfTerm_ = pdfHist_->getLikelihood(); + histTerm_ = pdfHist_->getLikelihood(); } else { - expTerm_ = expTerms_[iEvt]; - cosTerm_ = cosTerms_[iEvt]; - sinTerm_ = sinTerms_[iEvt]; - coshTerm_ = coshTerms_[iEvt]; - sinhTerm_ = sinhTerms_[iEvt]; - normTermExp_ = normTermsExp_[normTermElement]; - normTermCos_ = normTermsCos_[normTermElement]; - normTermSin_ = normTermsSin_[normTermElement]; - normTermCosh_ = normTermsCosh_[normTermElement]; - normTermSinh_ = normTermsSinh_[normTermElement]; + terms_ = termsStore_[iEvt]; + + const bool needPerEventNormTerms { smear_ and scaleWithPerEventError_ }; + const std::size_t normTermElement { needPerEventNormTerms * iEvt }; + normTerms_ = normTermsStore_[normTermElement]; } // Extract the decay time error PDF value - if ( needPerEventNormTerms and errHist_ ) { + if ( errHist_ ) { errHist_->calcLikelihoodInfo(iEvt); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } // Extract the decay time efficiency effiTerm_ = effiTerms_[iEvt]; } -void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa) +void LauDecayTimePdf::calcLikelihoodInfo( const Double_t abscissa ) { // Check whether any of the gaussians should be scaled - if any of them should we need the per-event error - if (this->doSmearing() and scaleWithPerEventError_) { - std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Per-event error on decay time not provided, cannot calculate anything."<calcLikelihoodInfo(abscissa, 0.0); } Double_t LauDecayTimePdf::calcEffiTerm( const Double_t abscissa ) const { - Double_t effiTerm{1.0}; - - switch( effMethod_ ) - { - case EfficiencyMethod::Spline : effiTerm = effiFun_ ? effiFun_ -> evaluate(abscissa) : 1.0 ; break; - case EfficiencyMethod::Binned : effiTerm = effiHist_ ? effiHist_-> GetBinContent(effiHist_-> FindFixBin(abscissa)) : 1.0 ; break; - case EfficiencyMethod::Flat : effiTerm = 1.0 ; break; - } + Double_t effiTerm { efficiencyModel_->getEfficiency( abscissa ) }; // TODO print warning messages for these, but not every time + // - do we even want the upper limit imposed? if ( effiTerm > 1.0 ) { effiTerm = 1.0; } // else if ( effiTerm < 0.0 ) { effiTerm = 0.0; } else if ( effiTerm <= 0.0 ) { effiTerm = 1e-20; } return effiTerm; } void LauDecayTimePdf::calcLikelihoodInfo(const Double_t abscissa, const Double_t abscissaErr) { // Check that the decay time and the decay time error are in valid ranges - if (abscissa > this->maxAbscissa() || abscissa < this->minAbscissa()) { - std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: "<minAbscissa()<<","<maxAbscissa()<<"]."< maxAbscissa_ or abscissa < minAbscissa_ ) { + std::cerr << "ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay time: " << abscissa + << " is outside allowed range: [" << minAbscissa_ << "," << maxAbscissa_ << "]" << std::endl; gSystem->Exit(EXIT_FAILURE); } - if ( (this->doSmearing() and scaleWithPerEventError_) && ( abscissaErr > this->maxAbscissaError() || abscissaErr < this->minAbscissaError() ) ) { - std::cerr<<"ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of Delta t error: "<minAbscissaError()<<","<maxAbscissaError()<<"]."< maxAbscissaError_ or abscissaErr < minAbscissaError_ ) ) { + std::cerr << "ERROR in LauDecayTimePdf::calcLikelihoodInfo : Given value of the decay-time error: " << abscissaErr + << " is outside allowed range: [" << minAbscissaError_ << "," << maxAbscissaError_ << "]." << std::endl; gSystem->Exit(EXIT_FAILURE); } - // Determine the decay time efficiency - effiTerm_ = this->calcEffiTerm( abscissa ); - - // For the histogram PDF just calculate that term and return - if (type_ == Hist){ - pdfHist_->calcLikelihoodInfo(abscissa); - pdfTerm_ = pdfHist_->getLikelihood(); - return; - } - - // If we're not using the resolution function, calculate the simple terms and return - if (!this->doSmearing()) { - this->calcNonSmearedTerms(abscissa); - return; - } - - // Get all the up to date parameter values for the resolution function - std::vector mean { meanVals_ }; - std::vector sigma { sigmaVals_ }; - std::vector frac { fracVals_ }; - - // Scale the gaussian parameters by the per-event error on Delta t (if appropriate) - if ( scaleWithPerEventError_ ) { - for (UInt_t i{0}; i x(nGauss_); - for (UInt_t i{0}; iunblindValue(); - } - - Double_t value(0.0); - - if (type_ == Delta || type_ == DeltaExp) { - - // Calculate the gaussian function(s) - const Double_t xMax = this->maxAbscissa(); - const Double_t xMin = this->minAbscissa(); - - for (UInt_t i(0); i 1e-10) { - Double_t exponent(0.0); - Double_t norm(0.0); - Double_t scale = LauConstants::root2*sigma[i]; - Double_t scale2 = LauConstants::rootPiBy2*sigma[i]; - exponent = -x[i]*x[i]; - norm = scale2*(TMath::Erf((xMax - mean[i])/scale) - - TMath::Erf((xMin - mean[i])/scale)); - value += frac[i]*TMath::Exp(exponent)/norm; - } - } - } - - if (type_ != Delta) { - - // Reset values of terms - expTerm_ = 0.0; - cosTerm_ = 0.0; - sinTerm_ = 0.0; - coshTerm_ = 0.0; - sinhTerm_ = 0.0; - - // Calculate values of the PDF convoluted with each Gaussian for a given value of the abscsissa - for (UInt_t i(0); ismearedGeneralTerm( z, x[i] ).real() }; - expTerm_ += frac[i] * expTerm; - - if ( type_ == ExpTrig or type_ == ExpHypTrig ) { - - const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; - const std::complex trigTerm { this->smearedGeneralTerm( zTrig, x[i] ) }; - - const Double_t cosTerm { trigTerm.real() }; - const Double_t sinTerm { trigTerm.imag() }; - - cosTerm_ += frac[i] * cosTerm; - sinTerm_ += frac[i] * sinTerm; - - if ( type_ == ExpTrig ) { - - coshTerm_ += frac[i] * expTerm; - - } else { - - const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; - const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2 }; - - const Double_t termH { this->smearedGeneralTerm( zH, x[i] ).real() }; - const Double_t termL { this->smearedGeneralTerm( zL, x[i] ).real() }; - - const Double_t coshTerm { 0.5 * (termH + termL) }; - const Double_t sinhTerm { 0.5 * (termH - termL) }; - - coshTerm_ += frac[i] * coshTerm; - sinhTerm_ += frac[i] * sinhTerm; - } - } - - } - - if (type_ == DeltaExp) { - value *= fracPrompt; - value += (1.0-fracPrompt)*expTerm_; - } else { - value = expTerm_; - } - } - // Calculate the decay time error PDF value - if ( scaleWithPerEventError_ and errHist_ ) { + if ( errHist_ ) { const std::vector absErrVec {abscissaErr}; errHist_->calcLikelihoodInfo(absErrVec); errTerm_ = errHist_->getLikelihood(); } else { errTerm_ = 1.0; } -} -void LauDecayTimePdf::calcNonSmearedTerms(const Double_t abscissa) -{ - // Reset values of terms - errTerm_ = 1.0; - expTerm_ = 0.0; - cosTerm_ = 0.0; - sinTerm_ = 0.0; - coshTerm_ = 0.0; - sinhTerm_ = 0.0; - - if ( type_ == Hist || type_ == Delta ){ + // For the histogram PDF just calculate that term and return + if ( type_ == LauDecayTime::FuncType::Hist ){ + pdfHist_->calcLikelihoodInfo(abscissa); + histTerm_ = pdfHist_->getLikelihood(); + effiTerm_ = 1.0; return; } - if (method_ == DecayTime) { - expTerm_ = TMath::Exp(-abscissa*gammaVal_); - } else if (method_ == DecayTimeDiff) { - expTerm_ = TMath::Exp(-TMath::Abs(abscissa)*gammaVal_); - } - - // Calculate also the terms related to cosine and sine - if (type_ == ExpTrig) { - - coshTerm_ = expTerm_; - sinhTerm_ = 0.0; - cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; - sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; - - } - - // Calculate also the terms related to cosh, sinh, cosine, and sine - else if (type_ == ExpHypTrig) { - - coshTerm_ = TMath::CosH(0.5*deltaGammaVal_*abscissa)*expTerm_; - sinhTerm_ = TMath::SinH(0.5*deltaGammaVal_*abscissa)*expTerm_; - cosTerm_ = TMath::Cos(deltaMVal_*abscissa)*expTerm_; - sinTerm_ = TMath::Sin(deltaMVal_*abscissa)*expTerm_; - - } -} - -std::complex LauDecayTimePdf::smearedGeneralTerm( const std::complex& z, const Double_t x ) -{ - using namespace std::complex_literals; - - const std::complex arg1 { 1i * (z - x) }; - - const std::complex arg2 { -(x*x) - (arg1*arg1) }; - - const std::complex conv { ( arg1.imag() < -5.0 ) ? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * std::exp(-(x*x)) * RooMath::faddeeva(arg1) }; - - return conv; -} - -std::pair LauDecayTimePdf::nonSmearedCosSinIntegral(const Double_t minAbs, const Double_t maxAbs) -{ - // From 1407.0748, not clear whether complex is faster in this case - - const LauComplex denom { gammaVal_, -deltaMVal_ }; - const LauComplex exponent { -gammaVal_, deltaMVal_ }; - - const LauComplex num0 { -exponent.scale(minAbs).exp() }; - const LauComplex num1 { -exponent.scale(maxAbs).exp() }; - - const LauComplex integral { (num1 - num0) / denom }; - - return {integral.re(), integral.im()}; -} - -Double_t LauDecayTimePdf::nonSmearedExpIntegral(const Double_t minAbs, const Double_t maxAbs) -{ - return tauVal_ * ( TMath::Exp(-minAbs*gammaVal_) - TMath::Exp(-maxAbs*gammaVal_) ); -} - -std::pair LauDecayTimePdf::nonSmearedCoshSinhIntegral(const Double_t minAbs, const Double_t maxAbs) -{ - // Use exponential formualtion rather than cosh, sinh. - // Fewer terms (reused for each), but not guaranteed to be faster. - - const Double_t gammaH { gammaVal_ - 0.5 * deltaGammaVal_ }; - const Double_t gammaL { gammaVal_ + 0.5 * deltaGammaVal_ }; - - const Double_t tauH { 1.0 / gammaH }; - const Double_t tauL { 1.0 / gammaL }; - - const Double_t nL1 { -TMath::Exp(-gammaL * maxAbs) * tauL }; - const Double_t nH1 { -TMath::Exp(-gammaH * maxAbs) * tauH }; - const Double_t nL0 { -TMath::Exp(-gammaL * minAbs) * tauL }; - const Double_t nH0 { -TMath::Exp(-gammaH * minAbs) * tauH }; - - const Double_t coshIntegral { 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ) }; - const Double_t sinhIntegral { 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ) }; - - return {coshIntegral, sinhIntegral}; -} - -std::complex LauDecayTimePdf::smearedGeneralIntegral(const std::complex& z, const Double_t minAbs, const Double_t maxAbs, const Double_t sigmaOverRoot2, const Double_t mu) -{ - using namespace std::complex_literals; - - const Double_t x1 { (maxAbs - mu) / (2.0 * sigmaOverRoot2) }; - const Double_t x0 { (minAbs - mu) / (2.0 * sigmaOverRoot2) }; - - const std::complex arg1 { 1i * (z - x1) }; - const std::complex arg0 { 1i * (z - x0) }; - - std::complex integral = 0.0 + 0i; - - if(arg1.imag() < -5.0) - {integral = RooMath::erf(x1) - std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} - else - {integral = RooMath::erf(x1) - TMath::Exp(-(x1*x1)) * RooMath::faddeeva(arg1);} - - if(arg0.imag() < -5.0) - {integral -= RooMath::erf(x0) - std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} - else - {integral -= RooMath::erf(x0) - TMath::Exp(-(x0*x0)) * RooMath::faddeeva(arg0);} - - integral *= (sigmaOverRoot2 / (2.0 * z)); - - return integral; -} - -void LauDecayTimePdf::calcNorm() -{ - // If we're not doing per-event scaling then we only need to calculate things once - // TODO - need to sort out UInt_t vs ULong_t everywhere!! - const Bool_t needPerEventNormTerms { this->doSmearing() and scaleWithPerEventError_ }; - const UInt_t nEvents { needPerEventNormTerms ? static_cast(abscissaErrors_.size()) : 1 }; - - // Get all the up to date parameter values - std::vector means { meanVals_ }; - std::vector sigmas { sigmaVals_ }; - std::vector fracs { fracVals_ }; - - for ( UInt_t iEvt{0}; iEvt < nEvents; ++iEvt ) { - - // first reset integrals to zero - normTermExp_ = 0.0; - normTermCos_ = 0.0; - normTermSin_ = 0.0; - normTermCosh_ = 0.0; - normTermSinh_ = 0.0; - - // Scale the gaussian parameters by the per-event error on decay time (if appropriate) - if ( needPerEventNormTerms ) { - const Double_t abscissaErr { abscissaErrors_[iEvt] }; - for (UInt_t i{0}; i doSmearing() ) - {this->calcSmearedPartialIntegrals( minAbscissa_, maxAbscissa_ , uniformEffVal, means, sigmas, fracs );} - else - {this->calcNonSmearedPartialIntegrals( minAbscissa_, maxAbscissa_, uniformEffVal );} - break; - } - - case EfficiencyMethod::Binned : - { - // Efficiency varies as piecewise constant - // Total integral is sum of integrals in each bin, each weighted by efficiency in that bin - const Int_t nBins { effiHist_->GetNbinsX() }; - for ( Int_t bin{1}; bin <= nBins; ++bin ) { - const Double_t loEdge {effiHist_->GetBinLowEdge(bin)}; - const Double_t hiEdge {loEdge + effiHist_->GetBinWidth(bin)}; - const Double_t effVal {effiHist_->GetBinContent(bin)}; - if ( this -> doSmearing() ) - {this->calcSmearedPartialIntegrals( loEdge, hiEdge, effVal, means, sigmas, fracs );} - else - {this->calcNonSmearedPartialIntegrals( loEdge, hiEdge, effVal );} - } - break; - } - - case EfficiencyMethod::Spline : - { - // Efficiency varies as piecewise polynomial - // Use methods from https://arxiv.org/abs/1407.0748 section 4 to calculate - const UInt_t nSplineSegments { effiFun_->getnKnots() - 1 }; - if ( this->doSmearing() ) { - if ( nonKnotChanged_ ) { - if ( resoParChanged_ ) { - this->calcMeanAndSigmaPowers( iEvt, means, sigmas ); - } - this->calcKVectors( iEvt ); - } - for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg) - { - this->calcSmearedSplinePartialIntegrals( iEvt, iSeg, means, sigmas, fracs ); - } - } else { - for(UInt_t iSeg{0}; iSeg < nSplineSegments; ++iSeg) - { - this->calcNonSmearedSplinePartialIntegrals( iSeg ); - } - } - break; - } - } - - normTermsExp_[iEvt] = normTermExp_; - normTermsCos_[iEvt] = normTermCos_; - normTermsSin_[iEvt] = normTermSin_; - normTermsCosh_[iEvt] = normTermCosh_; - normTermsSinh_[iEvt] = normTermSinh_; - } -} - -// TODO - Mildly concerned this is void rather than returning the integrals -// (but this would require refactoring for different return values). -// As long as it doesn't get called outside of calcNorm() it should be fine - DPO -// (TL: comment applies to all calc*PartialIntegrals functions.) -void LauDecayTimePdf::calcNonSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight) -{ - /* TODO - need to implement something for DecayTimeDiff everywhere - if (method_ == DecayTimeDiff) { - // TODO - there should be some TMath::Abs here surely? - normTermExp = weight * tauVal_ * (2.0 - TMath::Exp(-maxAbs*gammaVal_) - TMath::Exp(-minAbs*gammaVal_)); - } - */ - - const Double_t normTermExp { weight * this->nonSmearedExpIntegral(minAbs, maxAbs) }; - normTermExp_ += normTermExp; - - if ( type_ == ExpTrig or type_ == ExpHypTrig ) { - - auto [cosIntegral, sinIntegral] = this->nonSmearedCosSinIntegral(minAbs, maxAbs); - normTermCos_ += weight * cosIntegral; - normTermSin_ += weight * sinIntegral; - - if ( type_ == ExpTrig ) { - - normTermCosh_ += normTermExp; - - } else { - - auto [coshIntegral, sinhIntegral] = this->nonSmearedCoshSinhIntegral(minAbs, maxAbs); - normTermCosh_ += weight * coshIntegral; - normTermSinh_ += weight * sinhIntegral; - } - } -} - -void LauDecayTimePdf::calcSmearedPartialIntegrals(const Double_t minAbs, const Double_t maxAbs, const Double_t weight, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) -{ - for (UInt_t i(0); ismearedGeneralIntegral( z, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; - const Double_t normTermExp { weight * integral.real() }; - - normTermExp_ += fractions[i] * normTermExp; - - if ( type_ == ExpTrig or type_ == ExpHypTrig ) { - - const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; - const std::complex integralTrig { this->smearedGeneralIntegral( zTrig, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; - - const Double_t cosIntegral { integralTrig.real() }; - const Double_t sinIntegral { integralTrig.imag() }; - - normTermCos_ += fractions[i] * weight * cosIntegral; - normTermSin_ += fractions[i] * weight * sinIntegral; - - if ( type_ == ExpTrig ) { - - normTermCosh_ += fractions[i] * normTermExp; - - } else { - - // Heavy (H) eigenstate case - const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 }; - const std::complex integralH { this->smearedGeneralIntegral( zH, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; - - // Light (L) eigenstate case - const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };; - const std::complex integralL { this->smearedGeneralIntegral( zL, minAbs, maxAbs, sigmaOverRoot2, means[i] ) }; - - const std::complex coshIntegral { 0.5 * (integralH + integralL) }; - const std::complex sinhIntegral { 0.5 * (integralH - integralL) }; - - normTermCosh_ += fractions[i] * weight * coshIntegral.real(); - normTermSinh_ += fractions[i] * weight * sinhIntegral.real(); - } - } - } -} - -void LauDecayTimePdf::calcMeanAndSigmaPowers( const UInt_t iEvt, const std::vector& means, const std::vector& sigmas ) -{ - // Calculate powers of mu and sigma/sqrt(2) needed by all terms in the smeared spline normalisation - - for (UInt_t i(0); i z; - - for (UInt_t i(0); igenerateKvector(z); - - if ( type_ == ExpTrig || type_ == ExpHypTrig ) { - - z.real( gammaVal_ * sigmaOverRoot2 ); - z.imag( -deltaMVal_ * sigmaOverRoot2 ); - - trigTermKvecVals_[iEvt][i] = this->generateKvector(z); - - if ( type_ == ExpHypTrig ) { - - z.real( ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 );; - z.imag( 0.0 ); - - hypHTermKvecVals_[iEvt][i] = this->generateKvector(z); - - z.real( ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 ); - z.imag( 0.0 ); - - hypLTermKvecVals_[iEvt][i] = this->generateKvector(z); - - } - } - } -} - -void LauDecayTimePdf::calcSmearedSplinePartialIntegrals(const UInt_t iEvt, const UInt_t splineIndex, const std::vector& means, const std::vector& sigmas, const std::vector& fractions) -{ - using namespace std::complex_literals; - - const std::vector& xVals { effiFun_->getXValues() }; - const Double_t minAbs = xVals[splineIndex]; - const Double_t maxAbs = xVals[splineIndex+1]; - - const std::array coeffs { effiFun_->getCoefficients(splineIndex) }; - - std::complex z; - - for (UInt_t i(0); igenerateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); - } - - const Double_t normTermExp { this->smearedSplineNormalise(coeffs, expTermKvecVals_[iEvt][i], expTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() }; - - normTermExp_ += fractions[i] * normTermExp; - - if ( type_ == ExpTrig or type_ == ExpHypTrig ) { - - z.real( gammaVal_ * sigmaOverRoot2 ); - z.imag( -deltaMVal_ * sigmaOverRoot2 ); - - if ( nonKnotChanged_ ) { - trigTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, z, sigmas[i], means[i]); - } - - std::complex integral { this->smearedSplineNormalise(coeffs, trigTermKvecVals_[iEvt][i], trigTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]) }; - - const Double_t cosIntegral { integral.real() }; - const Double_t sinIntegral { integral.imag() }; - - normTermCos_ += fractions[i] * cosIntegral; - normTermSin_ += fractions[i] * sinIntegral; - - if ( type_ == ExpTrig ) { - - normTermCosh_ += fractions[i] * normTermExp; - - } else { - - const std::complex zH { ( gammaVal_ - 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; - const std::complex zL { ( gammaVal_ + 0.5 * deltaGammaVal_ ) * sigmaOverRoot2 }; - - if ( nonKnotChanged_ ) { - hypHTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zH, sigmas[i], means[i]); - hypLTermMvecVals_[iEvt][splineIndex][i] = this->generateMvector(minAbs, maxAbs, zL, sigmas[i], means[i]); - } - - const Double_t integralH { this->smearedSplineNormalise(coeffs, hypHTermKvecVals_[iEvt][i], hypHTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() }; - const Double_t integralL { this->smearedSplineNormalise(coeffs, hypLTermKvecVals_[iEvt][i], hypLTermMvecVals_[iEvt][splineIndex][i], sigmaPowerVals_[iEvt][i], meanPowerVals_[iEvt][i]).real() }; - - const Double_t coshIntegral { 0.5 * (integralH + integralL) }; - const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; - - normTermCosh_ += fractions[i] * coshIntegral; - normTermSinh_ += fractions[i] * sinhIntegral; - } - } - } -} - -std::array,4> LauDecayTimePdf::generateKvector(const std::complex& z) -{ - const std::complex zr { 1.0/z }; - const std::complex zr2 { zr*zr }; - - std::array,4> K { - 0.5*zr, - 0.5*zr2, - zr*(1.0+zr2), - 3.0*zr2*(1.0+zr2) - }; - - return K; -} - -std::array,4> LauDecayTimePdf::generateMvector(const Double_t minAbs, const Double_t maxAbs, const std::complex& z, const Double_t sigma, const Double_t mean) -{ - using namespace std::complex_literals; - - std::array,4> M0 {0.,0.,0.,0.}; - std::array,4> M1 {0.,0.,0.,0.}; - std::array,4> M {0.,0.,0.,0.}; - - const Double_t x1 { (maxAbs - mean) / (LauConstants::root2 * sigma) }; - const Double_t x0 { (minAbs - mean) / (LauConstants::root2 * sigma) }; - - //Values used a lot - const Double_t ex2_1 { TMath::Exp(-(x1*x1)) }; - const Double_t ex2_0 { TMath::Exp(-(x0*x0)) }; - const Double_t sqrtPir { 1.0 / LauConstants::rootPi }; - - const std::complex arg1 { 1i * (z - x1) }; - const std::complex arg0 { 1i * (z - x0) }; - - //fad = the faddeeva term times the ex2 value (done in different ways depending on the domain) - std::complex fad1; - std::complex fad0; - - if(arg1.imag() < -5.0) - {fad1 = std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} - else - {fad1 = ex2_1*RooMath::faddeeva(arg1);} - - if(arg0.imag() < -5.0) - {fad0 = std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} - else - {fad0 = ex2_0*RooMath::faddeeva(arg0);} - - //doing the actual functions for x1 - M1[0] = RooMath::erf(x1) - fad1; - M1[1] = 2. * (-sqrtPir*ex2_1 - x1*fad1); - M1[2] = 2. * (-2*x1*sqrtPir*ex2_1 - (2*x1*x1 - 1)*fad1); - M1[3] = 4. * (-(2*x1*x1 - 1)*sqrtPir*ex2_1 - x1*(2*x1*x1-3)*fad1); - - //doing them again for x0 - M0[0] = RooMath::erf(x0) - fad0; - M0[1] = 2. * (-sqrtPir*ex2_0 - x0*fad0); - M0[2] = 2. * (-2*x0*sqrtPir*ex2_0 - (2*x0*x0 - 1)*fad0); - M0[3] = 4. * (-(2*x0*x0 - 1)*sqrtPir*ex2_0 - x0*(2*x0*x0-3)*fad0); - - for(Int_t i = 0; i < 4; ++i){M[i] = M1[i] - M0[i];} - - return M; -} - -std::complex LauDecayTimePdf::smearedSplineNormalise(const std::array& coeffs, const std::array,4>& K, const std::array,4>& M, const std::array& sigmaPowers, const std::array& meanPowers) const -{ - using namespace std::complex_literals; - - //Triple sum to get N (eqn 31 and 29 in https://arxiv.org/pdf/1407.0748.pdf) - std::complex N = 0. + 0i; - - for(Int_t k = 0; k < 4; ++k){ - for(Int_t n = 0; n <=k; ++n){ - for(Int_t i = 0; i <=n; ++i){ - //The binomial coefficient terms - const Double_t b { binom_[n][i]*binom_[k][n] }; - N += sigmaPowers[n]*coeffs[k]*meanPowers[k-n]*K[i]*M[n-i]*b; - }}} - - return N; -} - -void LauDecayTimePdf::calcNonSmearedSplinePartialIntegrals(const UInt_t splineIndex) -{ - using namespace std::complex_literals; - - const std::complex u { gammaVal_ }; - const Double_t normTermExp { this->nonSmearedSplineNormalise(splineIndex, u, expTermIkVals_).real() }; - - normTermExp_ += normTermExp; - - if ( type_ == ExpTrig or type_ == ExpHypTrig ) { - - const std::complex uTrig { gammaVal_, -deltaMVal_ }; - const std::complex integral { this->nonSmearedSplineNormalise(splineIndex, uTrig, trigTermIkVals_) }; - - const Double_t cosIntegral { integral.real() }; - const Double_t sinIntegral { integral.imag() }; - - normTermCos_ += cosIntegral; - normTermSin_ += sinIntegral; - - if ( type_ == ExpTrig ) { - - normTermCosh_ += normTermExp; - - } else { - - const std::complex uH { gammaVal_ - 0.5 * deltaGammaVal_ }; - const std::complex uL { gammaVal_ + 0.5 * deltaGammaVal_ }; - - const Double_t integralH { this->nonSmearedSplineNormalise(splineIndex, uH, hypHTermIkVals_).real() }; - const Double_t integralL { this->nonSmearedSplineNormalise(splineIndex, uL, hypLTermIkVals_).real() }; - - const Double_t coshIntegral { 0.5 * (integralH + integralL) }; - const Double_t sinhIntegral { 0.5 * (integralH - integralL) }; - - normTermCosh_ += coshIntegral; - normTermSinh_ += sinhIntegral; - } - } -} - -std::complex LauDecayTimePdf::calcIk( const UInt_t k, const Double_t minAbs, const Double_t maxAbs, const std::complex& u ) -{ - //Taking mu = 0, this does not have to be the case in general - auto G = [&u](const Int_t n){return -TMath::Factorial(n)/std::pow(u,n+1);};//power of n+1 used rather than n, this is due to maths error in the paper - auto H = [&u](const Int_t n, const Double_t t){return std::pow(t,n)*std::exp(-u*t);}; - - std::complex ans { 0.0, 0.0 }; - for (UInt_t j = 0; j <= k; ++j) - {ans += binom_[k][j]*G(j)*( H( k-j, maxAbs ) - H( k-j, minAbs ) );} - - return ans; -} - -std::complex LauDecayTimePdf::nonSmearedSplineNormalise( const UInt_t splineIndex, const std::complex& u, std::vector,4>>& cache ) -{ - // u = Gamma - iDeltam in general - - using namespace std::complex_literals; - - const std::vector& xVals = effiFun_ -> getXValues(); - const Double_t minAbs = xVals[splineIndex]; - const Double_t maxAbs = xVals[splineIndex+1]; - - std::array coeffs = effiFun_->getCoefficients(splineIndex); + // Determine the decay time efficiency + effiTerm_ = this->calcEffiTerm( abscissa ); - //sum to get N (eqn 30 in https://arxiv.org/pdf/1407.0748.pdf, using I_k from Appendix B.1 with the corrected maths error) - std::complex N = 0. + 0i; - if ( nonKnotChanged_ ) { - for(UInt_t i = 0; i < 4; ++i) { - cache[splineIndex][i] = calcIk(i, minAbs, maxAbs, u); - N += cache[splineIndex][i] * coeffs[i]; - } - } else { - for(UInt_t i = 0; i < 4; ++i) { - N += cache[splineIndex][i] * coeffs[i]; - } - } + // Determine the other terms + terms_ = calculator_->calcTerms( abscissa, {1.0, abscissaErr} ); - return N; + // TODO - do we need to do this? + //if ( smear_ and scaleWithPerEventError_ ) { + //normTerms_ = integrator_->calcNormTerms( {1.0, abscissaErr} ); + //} } -Double_t LauDecayTimePdf::generateError(Bool_t forceNew) +Double_t LauDecayTimePdf::generateError( bool forceNew ) { - if (errHist_ && (forceNew || !abscissaErrorGenerated_)) { - LauFitData errData = errHist_->generate(nullptr); - abscissaError_ = errData.at(this->varErrName()); - abscissaErrorGenerated_ = kTRUE; - } else { - while (forceNew || !abscissaErrorGenerated_) { - abscissaError_ = LauRandom::randomFun()->Landau(errorDistMPV_,errorDistSigma_); - if (abscissaError_ < maxAbscissaError_ && abscissaError_ > minAbscissaError_) { - abscissaErrorGenerated_ = kTRUE; - forceNew = kFALSE; - } - } + if ( errHist_ && ( forceNew or not abscissaErrorGenerated_ ) ) { + LauFitData errData { errHist_->generate(nullptr) }; + abscissaError_ = errData.at( varErrName_ ); + abscissaErrorGenerated_ = true; } return abscissaError_; } -Double_t LauDecayTimePdf::generate(const LauKinematics* kinematics) +Double_t LauDecayTimePdf::generate( const LauKinematics* kinematics ) { // generateError SHOULD have been called before this // function but will call it here just to make sure // (has no effect if has already been called) abscissaError_ = this->generateError(); Double_t abscissa{0.0}; switch ( type_ ) { - case FuncType::ExpTrig : - case FuncType::ExpHypTrig : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : std::cerr << "ERROR in LauDecayTimePdf::generate : direct generation does not make sense for ExpTrig and ExpHypTrig types" << std::endl; return abscissa; - case FuncType::Hist : + case LauDecayTime::FuncType::Hist : { const LauFitData genAbscissa { pdfHist_ -> generate( kinematics ) }; abscissa = genAbscissa.at(this->varName()); break; } - case FuncType::Exp : + case LauDecayTime::FuncType::Exp : { do { abscissa = LauRandom::randomFun()->Uniform(minAbscissa_,maxAbscissa_); this->calcLikelihoodInfo( abscissa, abscissaError_ ); const Double_t pdfVal { this->getExpTerm() * this->getEffiTerm() }; const Double_t maxVal { this->getMaxHeight(kinematics) }; if ( pdfVal > maxVal ) { std::cerr << "WARNING in LauDecayTimePdf::generate : PDF value = " << pdfVal << " is larger than the maximum PDF height " << maxVal << "\n"; std::cerr << " : This occurs for the abscissa = " << abscissa << "\n"; std::cerr << " : This really should not happen!!" << std::endl; } if ( LauRandom::randomFun()->Rndm() <= pdfVal/maxVal ) { break; } } while ( true ); break; } - case FuncType::Delta : + case LauDecayTime::FuncType::Delta : { // TODO std::cerr << "WARNING in LauDecayTimePdf::generate : generation of Delta case not currently implemented" << std::endl; break; } - case FuncType::DeltaExp : + case LauDecayTime::FuncType::DeltaExp : { // TODO std::cerr << "WARNING in LauDecayTimePdf::generate : generation of DeltaExp case not currently implemented" << std::endl; break; } } // mark that we need a new error to be generated next time - abscissaErrorGenerated_ = kFALSE; + abscissaErrorGenerated_ = false; return abscissa; } Double_t LauDecayTimePdf::getMaxHeight(const LauKinematics* /*kinematics*/) { if ( heightUpToDate_ ) { return maxHeight_; } // TODO this is brute force - can we do this in a more refined way? // Scan in small increments across the space to find the maximum height const std::size_t nPoints { 1000 }; const Double_t range { maxAbscissa_ - minAbscissa_ }; const Double_t delta { range / nPoints }; maxHeight_ = 0.0; for ( Double_t point {minAbscissa_}; point <= maxAbscissa_; point += delta ) { this->calcLikelihoodInfo(point, abscissaError_); Double_t heightAtPoint { 0.0 }; - if ( type_ == FuncType::Exp ) { + if ( type_ == LauDecayTime::FuncType::Exp ) { heightAtPoint = this->getExpTerm(); } else { // TODO - implement the Delta and ExpDelta cases std::cerr << "WARNING in LauDecayTimePdf::getMaxHeight : only Exp case currently implemented" << std::endl; } heightAtPoint *= this->getEffiTerm(); if ( heightAtPoint > maxHeight_ ) { maxHeight_ = heightAtPoint; } } // Mutliply by 120% to be on the safe side maxHeight_ *= 1.2; // Mark the height as being up to date // (unless we're scaling by per-event error) heightUpToDate_ = not ( smear_ and scaleWithPerEventError_ ); return maxHeight_; } -void LauDecayTimePdf::setErrorHisto(const TH1* hist) +void LauDecayTimePdf::setBinnedEfficiency( std::unique_ptr effModel ) { - if ( errHist_ != nullptr ) { - std::cerr<<"WARNING in LauDecayTimePdf::setErrorHisto : Error histogram already set, not doing it again."<varErrName(), hist, this->minAbscissaError(), this->maxAbscissaError()); -} -void LauDecayTimePdf::setHistoPdf(const TH1* hist) -{ - if ( pdfHist_ != nullptr ) { - std::cerr<<"WARNING in LauDecayTimePdf::setHistoPdf : PDF histogram already set, not doing it again."<varName(), hist, this->minAbscissa(), this->maxAbscissa()); -} - -void LauDecayTimePdf::setEffiHist(const TH1* hist) -{ - if ( effiHist_ != nullptr ) { - std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : efficiency histogram already set, not doing it again." << std::endl; - return; - } - - if ( hist == nullptr ) { - std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : supplied efficiency histogram pointer is null." << std::endl; - return; - } - - // Check boundaries of histogram align with our abscissa's range - const Double_t axisMin {hist->GetXaxis()->GetXmin()}; - const Double_t axisMax {hist->GetXaxis()->GetXmax()}; - if ( TMath::Abs(minAbscissa_ - axisMin)>1e-6 || TMath::Abs(maxAbscissa_ - axisMax)>1e-6 ) { - std::cerr << "WARNING in LauDecayTimePdf::setEffiHist : mismatch in range between supplied histogram and abscissa\n" - << " : histogram range: " << axisMin << " - " << axisMax << "\n" - << " : abscissa range: " << minAbscissa_ << " - " << maxAbscissa_ << "\n" - << " : Disregarding this histogram." << std::endl; - return; - } - - effiHist_ = dynamic_cast( hist->Clone() ); - - //Normalise the hist if the (relative) efficiencies have very large values - if(effiHist_ -> GetMaximum() > 1.) - { - effiHist_ -> Scale( 1. / effiHist_->Integral() ); //Normalise - std::cout << "INFO in LauDecayTimePdf::setEffiHist : Supplied histogram for Decay Time Acceptance has values too large: normalising..." << std::endl; - } -} - - -void LauDecayTimePdf::setEffiSpline(Lau1DCubicSpline* spline) -{ - if ( effiFun_ != nullptr ) { - std::cerr<<"WARNING in LauDecayTimePdf::setEffiSpline : efficiency function already set, not doing it again."< effis = effiFun_->getYValues(); - - effiPars_.resize( effis.size() ); - effiParVals_.resize( effis.size() ); - size_t index = 0; - - for( Double_t& effi : effis ) - { - effiPars_[ index ] = new LauParameter( Form( "%s_Knot_%lu", varName_.Data() ,index ), effi, 0.0, 1.0, kTRUE ); - ++index; + // Create the approptiate integrator + // NB need to use effModel here since it needs to be of concrete type + if ( smear_ ) { + integrator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_, *effModel, *resolutionModel_ ); + } else { + integrator_ = std::make_unique( minAbscissa_, maxAbscissa_, *physicsModel_, *effModel ); } -} -LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) -{ - for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { - if ((*iter)->name().Contains(parName)) { - return (*iter); - } - } - std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; - return nullptr; -} - -const LauAbsRValue* LauDecayTimePdf::findParameter(const TString& parName) const -{ - for ( std::vector::const_iterator iter = params_.begin(); iter != params_.end(); ++iter ) { - if ((*iter)->name().Contains(parName)) { - return (*iter); - } - } - std::cerr << "ERROR in LauDecayTimePdf::findParameter : Parameter \"" << parName << "\" not found." << std::endl; - return nullptr; + // Store the efficiency model (as a pointer to base) + efficiencyModel_ = std::move(effModel); } void LauDecayTimePdf::updatePulls() { for ( std::vector::iterator iter = params_.begin(); iter != params_.end(); ++iter ) { std::vector params = (*iter)->getPars(); for (std::vector::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter ) { if (!(*iter)->fixed()) { (*params_iter)->updatePull(); } } } } void LauDecayTimePdf::propagateParUpdates() { // If none of the parameters are floating there's nothing to do - if ( nothingFloating_ ) { + if ( not anythingFloating_ ) { return; } - // Otherwise, determine which of the floating parameters have changed (if any) and act accordingly - - static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;}; - anyKnotChanged_ = anyKnotFloating_ and not std::equal(effiPars_.begin(), effiPars_.end(), effiParVals_.begin(), checkEquality); + physicsModel_->propagateParUpdates(); + physicsParChanged_ = physicsModel_->anythingChanged(); - // Update the acceptance spline if any of the knot values have changed - if ( anyKnotChanged_ ) { - effiFun_->updateYValues( effiPars_ ); + if ( resolutionModel_ ) { + resolutionModel_->propagateParUpdates(); + resoParChanged_ = resolutionModel_->anythingChanged(); } - // Check also the physics and resolution parameters - if ( nonKnotFloating_ ) { - if ( physicsParFloating_ ) { - tauChanged_ = tauFloating_ and not checkEquality(tau_, tauVal_); - deltaMChanged_ = deltaMFloating_ and not checkEquality(deltaM_, deltaMVal_); - deltaGammaChanged_ = deltaGammaFloating_ and not checkEquality(deltaGamma_, deltaGammaVal_); - - physicsParChanged_ = tauChanged_ || deltaMChanged_ || deltaGammaChanged_; - } - - if ( resoParFloating_ ) { - resoParChanged_ = kFALSE; - resoParChanged_ |= not std::equal( mean_.begin(), mean_.end(), meanVals_.begin(), checkEquality ); - resoParChanged_ |= not std::equal( sigma_.begin(), sigma_.end(), sigmaVals_.begin(), checkEquality ); - resoParChanged_ |= not std::equal( frac_.begin(), frac_.end(), fracVals_.begin()+1, checkEquality ); - } - - nonKnotChanged_ = physicsParChanged_ or resoParChanged_; - } + efficiencyModel_->propagateParUpdates(); + effiParChanged_ = efficiencyModel_->anythingChanged(); // If nothing has changed, there's nothing to do - if ( not ( anyKnotChanged_ or nonKnotChanged_ ) ) { + anythingChanged_ = physicsParChanged_ or resoParChanged_ or effiParChanged_; + if ( not anythingChanged_ ) { return; } + calculator_->propagateParUpdates(); + integrator_->propagateParUpdates(); + // Otherwise we need to update the cache this->updateCache(); } - -/* -void LauDecayTimePdf::updateEffiSpline(const std::vector& effiPars) -{ - if (effiPars.size() != effiFun_->getnKnots()){ - std::cerr<<"ERROR in LauDecayTimePdf::updateEffiSpline : number of efficiency parameters is not equal to the number of spline knots."<Exit(EXIT_FAILURE); - } - effiFun_->updateYValues(effiPars); -} -*/ diff --git a/src/LauDecayTimePhysicsModel.cc b/src/LauDecayTimePhysicsModel.cc new file mode 100644 index 0000000..c6db2fe --- /dev/null +++ b/src/LauDecayTimePhysicsModel.cc @@ -0,0 +1,196 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauDecayTimePhysicsModel.cc + \brief File containing implementation of LauDecayTimePhysicsModel class. +*/ + +#include +#include + +#include "TSystem.h" + +#include "LauAbsRValue.hh" +#include "LauDecayTime.hh" +#include "LauDecayTimePhysicsModel.hh" + +ClassImp(LauDecayTimePhysicsModel); + + +LauDecayTimePhysicsModel::LauDecayTimePhysicsModel( const LauDecayTime::FuncType type, std::vector parameters ) : + type_{type}, + params_{std::move(parameters)} +{ + bool foundParams{true}; + switch ( type_ ) { + case LauDecayTime::FuncType::Hist : + std::cerr << "ERROR in LauDecayTimePhysicsModel::LauDecayTimePhysicsModel : Hist type needs no physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Delta : + if ( ! params_.empty() ) { + std::cerr << "ERROR in LauDecayTimePhysicsModel::LauDecayTimePhysicsModel : Delta type model requires no parameters" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + break; + case LauDecayTime::FuncType::Exp : + tau_ = this->findParameter("tau"); + foundParams &= (tau_ != nullptr); + if ( params_.size() != 1 || (!foundParams)) { + std::cerr << "ERROR in LauDecayTimePhysicsModel::LauDecayTimePhysicsModel : Exp type model requires the following parameters:\n"; + std::cerr << " - the lifetime of the exponential decay: \"tau\"" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + break; + case LauDecayTime::FuncType::DeltaExp : + tau_ = this->findParameter("tau"); + fracPrompt_ = this->findParameter("frac_prompt"); + foundParams &= (tau_ != nullptr); + foundParams &= (fracPrompt_ != nullptr); + if ( params_.size() != 2 || (!foundParams)) { + std::cerr << "ERROR in LauDecayTimePhysicsModel::LauDecayTimePhysicsModel : DeltaExp type model requires the following parameters:\n"; + std::cerr << " - the lifetime of the exponential decay: \"tau\"\n"; + std::cerr << " - the fraction of the prompt part: \"frac_prompt\"" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + break; + case LauDecayTime::FuncType::ExpTrig : + tau_ = this->findParameter("tau"); + deltaM_ = this->findParameter("deltaM"); + foundParams &= (tau_ != nullptr); + foundParams &= (deltaM_ != nullptr); + if ( params_.size() != 2 || (!foundParams)) { + std::cerr << "ERROR in LauDecayTimePhysicsModel::LauDecayTimePhysicsModel : ExpTrig type model requires the following parameters:\n"; + std::cerr << " - the lifetime of the exponential decay: \"tau\"\n"; + std::cerr << " - the mass difference: \"deltaM\"" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + break; + case LauDecayTime::FuncType::ExpHypTrig : + tau_ = this->findParameter("tau"); + deltaM_ = this->findParameter("deltaM"); + deltaGamma_ = this->findParameter("deltaGamma"); + foundParams &= (tau_ != nullptr); + foundParams &= (deltaM_ != nullptr); + foundParams &= (deltaGamma_ != nullptr); + if ( params_.size() != 3 || (!foundParams)) { + std::cerr << "ERROR in LauDecayTimePhysicsModel::LauDecayTimePhysicsModel : ExpHypTrig type model requires the following parameters:\n"; + std::cerr << " - the lifetime of the exponential decay: \"tau\"\n"; + std::cerr << " - the mass difference: \"deltaM\"\n"; + std::cerr << " - the width difference: \"deltaGamma\"" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + break; + } +} + +LauAbsRValue* LauDecayTimePhysicsModel::findParameter(const TString& parName) +{ + for ( LauAbsRValue* par : params_ ) { + if ( par && par->name().Contains(parName) ) { + return par; + } + } + std::cerr << "ERROR in LauDecayTimePhysicsModel::findParameter : Parameter \"" << parName << "\" not found." << std::endl; + return nullptr; +} + +const LauAbsRValue* LauDecayTimePhysicsModel::findParameter(const TString& parName) const +{ + for ( const LauAbsRValue* par : params_ ) { + if ( par && par->name().Contains(parName) ) { + return par; + } + } + std::cerr << "ERROR in LauDecayTimePhysicsModel::findParameter : Parameter \"" << parName << "\" not found." << std::endl; + return nullptr; +} + +void LauDecayTimePhysicsModel::initialise() +{ + tauFloating_ = tauChanged_ = ( tau_ != nullptr ); + deltaMFloating_ = deltaMChanged_ = ( deltaM_ != nullptr ); + deltaGammaFloating_ = deltaGammaChanged_ = ( deltaGamma_ != nullptr ); + fracPromptFloating_ = fracPromptChanged_ = ( fracPrompt_ != nullptr ); + + this->updateParameterCache(); + + tauFloating_ = ( tau_ != nullptr ) and not tau_->fixed(); + deltaMFloating_ = ( deltaM_ != nullptr ) and not deltaM_->fixed(); + deltaGammaFloating_ = ( deltaGamma_ != nullptr ) and not deltaGamma_->fixed(); + fracPromptFloating_ = ( fracPrompt_ != nullptr ) and not fracPrompt_->fixed(); + + anythingFloating_ = ( tauFloating_ or deltaMFloating_ or deltaGammaFloating_ or fracPromptFloating_ ); + + std::cout << "INFO in LauDecayTimePhysicsModel::initialise : tau floating set to: " << (tauFloating_ ? "True" : "False") << std::endl; + std::cout << "INFO in LauDecayTimePhysicsModel::initialise : deltaM floating set to: " << (deltaMFloating_ ? "True" : "False") << std::endl; + std::cout << "INFO in LauDecayTimePhysicsModel::initialise : deltaGamma floating set to: " << (deltaGammaFloating_ ? "True" : "False") << std::endl; + std::cout << "INFO in LauDecayTimePhysicsModel::initialise : fracPrompt floating set to: " << (fracPromptFloating_ ? "True" : "False") << std::endl; +} + +void LauDecayTimePhysicsModel::updateParameterCache() +{ + // Get the updated values of all parameters + + if ( tauChanged_ ) { + tauVal_ = tau_->unblindValue(); + gammaVal_ = 1.0 / tauVal_; + } + + if ( deltaMChanged_ ) { + deltaMVal_ = deltaM_->unblindValue(); + } + + if ( deltaGammaChanged_ ) { + deltaGammaVal_ = deltaGamma_->unblindValue(); + } + + if ( fracPromptChanged_ ) { + fracPromptVal_ = fracPrompt_->unblindValue(); + } +} + +void LauDecayTimePhysicsModel::propagateParUpdates() +{ + // If none of the parameters are floating there's nothing to do + if ( not anythingFloating_ ) { + return; + } + + // Otherwise, determine whether any of the floating parameters have changed and act accordingly + + static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;}; + + tauChanged_ = tauFloating_ and not checkEquality(tau_, tauVal_); + deltaMChanged_ = deltaMFloating_ and not checkEquality(deltaM_, deltaMVal_); + deltaGammaChanged_ = deltaGammaFloating_ and not checkEquality(deltaGamma_, deltaGammaVal_); + fracPromptChanged_ = fracPromptFloating_ and not checkEquality(fracPrompt_, fracPromptVal_); + + anythingChanged_ = ( tauChanged_ or deltaMChanged_ or deltaGammaChanged_ or fracPromptChanged_ ); + + if ( anythingChanged_ ) { + this->updateParameterCache(); + } +} + diff --git a/src/LauDecayTimeResolution.cc b/src/LauDecayTimeResolution.cc new file mode 100644 index 0000000..d0525d7 --- /dev/null +++ b/src/LauDecayTimeResolution.cc @@ -0,0 +1,209 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauDecayTimeResolution.cc + \brief File containing implementation of LauDecayTimeResolution class. +*/ + +#include +#include +#include +#include + +#include "TString.h" +#include "TSystem.h" + +#include "LauAbsRValue.hh" +#include "LauDecayTimeResolution.hh" + +ClassImp(LauDecayTimeResolution); + + +LauDecayTimeResolution::LauDecayTimeResolution( const std::size_t nGauss, const std::vector& resolutionParams, const bool scale ) : + nGauss_{nGauss}, + scaleWithPerEventError_{scale}, + scaleMeans_(nGauss_,scale), + scaleWidths_(nGauss_,scale), + params_{resolutionParams} +{ + this->checkSetup(); +} + +LauDecayTimeResolution::LauDecayTimeResolution( const std::size_t nGauss, const std::vector& resolutionParams, const std::vector& scale ) : + nGauss_{nGauss}, + scaleWithPerEventError_( std::accumulate( scale.begin(), scale.end(), false, std::logical_or() ) ), + scaleMeans_{scale}, + scaleWidths_{scale}, + params_{resolutionParams} +{ + this->checkSetup(); +} + +LauDecayTimeResolution::LauDecayTimeResolution( const std::size_t nGauss, const std::vector& resolutionParams, const std::vector& scaleMeans, const std::vector& scaleWidths ) : + nGauss_{nGauss}, + scaleWithPerEventError_( std::accumulate( scaleMeans.begin(), scaleMeans.end(), false, std::logical_or() ) || std::accumulate( scaleWidths.begin(), scaleWidths.end(), kFALSE, std::logical_or() ) ), + scaleMeans_{scaleMeans}, + scaleWidths_{scaleWidths}, + params_{resolutionParams} +{ + this->checkSetup(); +} + +void LauDecayTimeResolution::resizeVectors() +{ + fractions_.assign( nGauss_-1, nullptr ); + means_.assign( nGauss_, nullptr ); + widths_.assign( nGauss_, nullptr ); + + fractionVals_.assign( nGauss_, 0.0 ); + meanVals_.assign( nGauss_, 0.0 ); + widthVals_.assign( nGauss_, 0.0 ); +} + +void LauDecayTimeResolution::checkSetup() +{ + if ( nGauss_ == 0 ) { + std::cerr << "ERROR in LauDecayTimeResolution::checkSetup : number of Gaussians is zero!" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + + if ( scaleWithPerEventError_ ) { + // if we're scaling by the per-event error, check that the scale vectors are the right length + if ( scaleMeans_.size() != nGauss_ || scaleWidths_.size() != nGauss_ ) { + std::cerr << "ERROR in LauDecayTimeResolution::checkSetup : number of Gaussians = " << nGauss_ << " but scale vectors are of length " << scaleMeans_.size() << " and " << scaleWidths_.size() << ", cannot continue!" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + } + + this->resizeVectors(); + + const TString meanNameBase{"mean_"}; + const TString sigmaNameBase{"sigma_"}; + const TString fracNameBase{"frac_"}; + + bool foundParams{true}; + + for (std::size_t i{0}; i < nGauss_; ++i) { + TString tempName{meanNameBase}; tempName += i; + TString tempName2{sigmaNameBase}; tempName2 += i; + TString tempName3{fracNameBase}; tempName3 += i; + + means_[i] = this->findParameter(tempName); + foundParams &= (means_[i] != nullptr); + + widths_[i] = this->findParameter(tempName2); + foundParams &= (widths_[i] != nullptr); + + if (i!=0) { + fractions_[i-1] = this->findParameter(tempName3); + foundParams &= (fractions_[i-1] != nullptr); + } + } + + if ( ! foundParams ) { + std::cerr << "ERROR in LauDecayTimeResolution::checkSetup : decay time resolution function with " << nGauss_ << " Gaussians requires:\n"; + std::cerr << " - 2 parameters per Gaussian (i): \"mean_i\" and \"sigma_i\"\n"; + std::cerr << " - " << nGauss_-1 << " fractions: \"frac_i\", where i!=0" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } +} + +LauAbsRValue* LauDecayTimeResolution::findParameter(const TString& parName) +{ + for ( LauAbsRValue* par : params_ ) { + if ( par && par->name().Contains(parName) ) { + return par; + } + } + std::cerr << "ERROR in LauDecayTimeResolution::findParameter : Parameter \"" << parName << "\" not found." << std::endl; + return nullptr; +} + +const LauAbsRValue* LauDecayTimeResolution::findParameter(const TString& parName) const +{ + for ( const LauAbsRValue* par : params_ ) { + if ( par && par->name().Contains(parName) ) { + return par; + } + } + std::cerr << "ERROR in LauDecayTimeResolution::findParameter : Parameter \"" << parName << "\" not found." << std::endl; + return nullptr; +} + +void LauDecayTimeResolution::initialise() +{ + anythingFloating_ = false; + for ( std::size_t i{0}; i < nGauss_; ++i ) + { + const bool meanFloating { not means_[i]->fixed() }; + const bool widthFloating { not widths_[i]->fixed() }; + + anythingFloating_ |= (meanFloating or widthFloating); + + std::cout << "INFO in LauDecayTimeResolution::initialise : mean[" << i << "] floating set to: " << (meanFloating ? "True" : "False") << std::endl; + std::cout << "INFO in LauDecayTimeResolution::initialise : width[" << i << "] floating set to: " << (widthFloating ? "True" : "False") << std::endl; + + if ( i < (nGauss_ - 1) ) { + const bool fracFloating { not fractions_[i]->fixed() }; + + anythingFloating_ |= fracFloating; + + std::cout << "INFO in LauDecayTimeResolution::initialise : fraction[" << i << "] floating set to: " << (fracFloating ? "True" : "False") << std::endl; + } + } + + this->updateParameterCache(); +} + +void LauDecayTimeResolution::updateParameterCache() +{ + static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();}; + + std::transform( means_.begin(), means_.end(), meanVals_.begin(), assignValue ); + std::transform( widths_.begin(), widths_.end(), widthVals_.begin(), assignValue ); + std::transform( fractions_.begin(), fractions_.end(), fractionVals_.begin()+1, assignValue ); + fractionVals_[0] = std::accumulate( fractionVals_.begin()+1, fractionVals_.end(), 1.0, std::minus{} ); +} + +void LauDecayTimeResolution::propagateParUpdates() +{ + // If none of the parameters are floating there's nothing to do + if ( not anythingFloating_ ) { + return; + } + + // Otherwise, determine whether any of the floating parameters have changed and act accordingly + + static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;}; + + anythingChanged_ = false; + anythingChanged_ |= not std::equal( means_.begin(), means_.end(), meanVals_.begin(), checkEquality ); + anythingChanged_ |= not std::equal( widths_.begin(), widths_.end(), widthVals_.begin(), checkEquality ); + anythingChanged_ |= not std::equal( fractions_.begin(), fractions_.end(), fractionVals_.begin()+1, checkEquality ); + + if ( anythingChanged_ ) { + this->updateParameterCache(); + } +} + diff --git a/src/LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.cc b/src/LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.cc new file mode 100644 index 0000000..150f0d6 --- /dev/null +++ b/src/LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.cc @@ -0,0 +1,115 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.cc + \brief File containing implementation of LauNonSmearedBinnedEfficiencyDecayTimeIntegrator class. +*/ + +#include +#include + +#include "TSystem.h" + +#include "LauNonSmearedBinnedEfficiencyDecayTimeIntegrator.hh" + +ClassImp(LauNonSmearedBinnedEfficiencyDecayTimeIntegrator); + + +LauNonSmearedBinnedEfficiencyDecayTimeIntegrator::LauNonSmearedBinnedEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauBinnedDecayTimeEfficiency& effModel ) +{ + switch ( physModel.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + // These do not make sense for us: + // - Hist has no need of an integrator + // - Delta and DeltaExp require a resolution function + std::cerr << "ERROR in LauNonSmearedBinnedEfficiencyDecayTimeIntegrator::LauNonSmearedBinnedEfficiencyDecayTimeIntegrator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } + + // Extract the binning info from the efficiency model + const std::vector binInfo { effModel.getBinningInfo() }; + + // Check that the first low edge and the last high edge match the minAbscissaVal and maxAbscissaVal we've been given + if ( binInfo.front().loEdge != minAbscissaVal or binInfo.back().hiEdge != maxAbscissaVal ) { + std::cerr << "ERROR in LauNonSmearedBinnedEfficiencyDecayTimeIntegrator::LauNonSmearedBinnedEfficiencyDecayTimeIntegrator : binning info does not correspond to specified range\n"; + std::cerr << " : range: " << minAbscissaVal << " - " << maxAbscissaVal << "\n"; + for ( auto [ loEdge, hiEdge, efficiency ] : binInfo ) { + std::cerr << " : bin: " << loEdge << " - " << hiEdge << "\n"; + } + std::cerr << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + + // If all is well, create uniform integrators for each bin + efficiencies_.reserve( binInfo.size() ); + integrators_.reserve( binInfo.size() ); + for ( auto [ loEdge, hiEdge, efficiency ] : binInfo ) { + efficiencies_.emplace_back( efficiency ); + integrators_.emplace_back( loEdge, hiEdge, physModel, efficiencies_.back() ); + } +} + +void LauNonSmearedBinnedEfficiencyDecayTimeIntegrator::cacheInfo( const std::vector& abscissaErrors ) +{ + for ( auto& integrator : integrators_ ) { + integrator.cacheInfo( abscissaErrors ); + } +} + +void LauNonSmearedBinnedEfficiencyDecayTimeIntegrator::propagateParUpdates() +{ + for ( auto& integrator : integrators_ ) { + integrator.propagateParUpdates(); + } +} + +LauDecayTimeNormTerms LauNonSmearedBinnedEfficiencyDecayTimeIntegrator::getNormTerms( const std::size_t iEvt ) const +{ + LauDecayTimeNormTerms normTerms; + + for ( const auto& integrator : integrators_ ) { + normTerms += integrator.getNormTerms( iEvt ); + } + + return normTerms; +} + +LauDecayTimeNormTerms LauNonSmearedBinnedEfficiencyDecayTimeIntegrator::calcNormTerms( const std::array& abscissaError ) const +{ + LauDecayTimeNormTerms normTerms; + + for ( const auto& integrator : integrators_ ) { + normTerms += integrator.calcNormTerms( abscissaError ); + } + + return normTerms; +} diff --git a/src/LauNonSmearedDecayTimeCalculator.cc b/src/LauNonSmearedDecayTimeCalculator.cc new file mode 100644 index 0000000..ccf5be7 --- /dev/null +++ b/src/LauNonSmearedDecayTimeCalculator.cc @@ -0,0 +1,119 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauNonSmearedDecayTimeCalculator.cc + \brief File containing implementation of LauNonSmearedDecayTimeCalculator class. +*/ + +#include +#include + +#include "TMath.h" +#include "TSystem.h" + +#include "LauNonSmearedDecayTimeCalculator.hh" + +ClassImp(LauNonSmearedDecayTimeCalculator); + + +LauNonSmearedDecayTimeCalculator::LauNonSmearedDecayTimeCalculator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel ) : + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physModel_{physModel}, + tauVal_{physModel_.getLifetimeValue()}, + gammaVal_{physModel_.getGammaValue()}, + deltaMVal_{physModel_.getDeltaMValue()}, + deltaGammaVal_{physModel_.getDeltaGammaValue()}, + physicsParamChanged_{physModel_.anythingChanged()} +{ + switch ( physModel_.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + // These do not make sense for us: + // - Hist has no need of an integrator + // - Delta and DeltaExp require a resolution function + std::cerr << "ERROR in LauNonSmearedDecayTimeCalculator::LauNonSmearedDecayTimeCalculator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } +} + +void LauNonSmearedDecayTimeCalculator::cacheInfo( const std::vector& abscissas, [[maybe_unused]] const std::vector& abscissaErrors ) +{ + abscissas_ = abscissas; + terms_.clear(); + terms_.resize( abscissas_.size() ); + this->updateCache(); +} + +void LauNonSmearedDecayTimeCalculator::updateCache() +{ + const std::size_t nEvents { abscissas_.size() }; + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + terms_[iEvt] = this->calcTerms( abscissas_[iEvt], {1.0,1.0} ); + } +} + +void LauNonSmearedDecayTimeCalculator::propagateParUpdates() +{ + if ( physicsParamChanged_ ) { + this->updateCache(); + } +} + +LauDecayTimeTerms LauNonSmearedDecayTimeCalculator::calcTerms( const Double_t abscissa, [[maybe_unused]] const std::array& abscissaError ) +{ + LauDecayTimeTerms terms; + + terms.expTerm = TMath::Exp( -abscissa * gammaVal_ ); + + // Calculate also the terms related to cosine and sine + const LauDecayTime::FuncType type { physModel_.getFunctionType() }; + if ( type == LauDecayTime::FuncType::ExpTrig ) { + + terms.coshTerm = terms.expTerm; + terms.sinhTerm = 0.0; + terms.cosTerm = TMath::Cos( deltaMVal_ * abscissa ) * terms.expTerm; + terms.sinTerm = TMath::Sin( deltaMVal_ * abscissa ) * terms.expTerm; + + } + + // Calculate also the terms related to cosh, sinh, cosine, and sine + else if ( type == LauDecayTime::FuncType::ExpHypTrig ) { + + terms.coshTerm = TMath::CosH( 0.5 * deltaGammaVal_ * abscissa ) * terms.expTerm; + terms.sinhTerm = TMath::SinH( 0.5 * deltaGammaVal_ * abscissa ) * terms.expTerm; + terms.cosTerm = TMath::Cos( deltaMVal_ * abscissa ) * terms.expTerm; + terms.sinTerm = TMath::Sin( deltaMVal_ * abscissa ) * terms.expTerm; + + } + + return terms; +} diff --git a/src/LauNonSmearedUniformEfficiencyDecayTimeIntegrator.cc b/src/LauNonSmearedUniformEfficiencyDecayTimeIntegrator.cc new file mode 100644 index 0000000..9c81603 --- /dev/null +++ b/src/LauNonSmearedUniformEfficiencyDecayTimeIntegrator.cc @@ -0,0 +1,156 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauNonSmearedUniformEfficiencyDecayTimeIntegrator.cc + \brief File containing implementation of LauNonSmearedUniformEfficiencyDecayTimeIntegrator class. +*/ + +#include +#include + +#include "TMath.h" +#include "TSystem.h" + +#include "LauComplex.hh" +#include "LauNonSmearedUniformEfficiencyDecayTimeIntegrator.hh" + +ClassImp(LauNonSmearedUniformEfficiencyDecayTimeIntegrator); + + +LauNonSmearedUniformEfficiencyDecayTimeIntegrator::LauNonSmearedUniformEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauUniformDecayTimeEfficiency& effModel ) : + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physModel_{physModel}, + effModel_{effModel}, + tauVal_{physModel_.getLifetimeValue()}, + gammaVal_{physModel_.getGammaValue()}, + deltaMVal_{physModel_.getDeltaMValue()}, + deltaGammaVal_{physModel_.getDeltaGammaValue()}, + physicsParamChanged_{physModel_.anythingChanged()} +{ + switch ( physModel_.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + // These do not make sense for us: + // - Hist has no need of an integrator + // - Delta and DeltaExp require a resolution function + std::cerr << "ERROR in LauNonSmearedUniformEfficiencyDecayTimeIntegrator::LauNonSmearedUniformEfficiencyDecayTimeIntegrator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } +} + +void LauNonSmearedUniformEfficiencyDecayTimeIntegrator::cacheInfo( [[maybe_unused]] const std::vector& abscissaErrors ) +{ + normTerms_ = this->calcNormTerms(); +} + +void LauNonSmearedUniformEfficiencyDecayTimeIntegrator::propagateParUpdates() +{ + if ( physicsParamChanged_ ) { + normTerms_ = this->calcNormTerms(); + } +} + +LauDecayTimeNormTerms LauNonSmearedUniformEfficiencyDecayTimeIntegrator::calcNormTerms( [[maybe_unused]] const std::array& abscissaError ) const +{ + LauDecayTimeNormTerms normTerms; + + const Double_t eff { effModel_.getEfficiency( minAbscissa_ ) }; + + const Double_t expIntegral { this->calcExpIntegral() }; + normTerms.expTerm = eff * expIntegral; + + const LauDecayTime::FuncType funcType { physModel_.getFunctionType() }; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + + const auto [cosIntegral, sinIntegral] { this->calcCosSinIntegral() }; + normTerms.cosTerm = eff * cosIntegral; + normTerms.sinTerm = eff * sinIntegral; + + if ( funcType == LauDecayTime::FuncType::ExpTrig ) { + + normTerms.coshTerm = normTerms.expTerm; + + } else { + + const auto [coshIntegral, sinhIntegral] { this->calcCoshSinhIntegral() }; + normTerms.coshTerm = eff * coshIntegral; + normTerms.sinhTerm = eff * sinhIntegral; + + } + + } + + return normTerms; +} + +Double_t LauNonSmearedUniformEfficiencyDecayTimeIntegrator::calcExpIntegral() const +{ + return tauVal_ * ( TMath::Exp(-minAbscissa_*gammaVal_) - TMath::Exp(-maxAbscissa_*gammaVal_) ); +} + +std::pair LauNonSmearedUniformEfficiencyDecayTimeIntegrator::calcCosSinIntegral() const +{ + // From 1407.0748, not clear whether complex is faster in this case + + const LauComplex denom { gammaVal_, -deltaMVal_ }; + const LauComplex exponent { -gammaVal_, deltaMVal_ }; + + const LauComplex num0 { -exponent.scale(minAbscissa_).exp() }; + const LauComplex num1 { -exponent.scale(maxAbscissa_).exp() }; + + const LauComplex integral { (num1 - num0) / denom }; + + return {integral.re(), integral.im()}; +} + +std::pair LauNonSmearedUniformEfficiencyDecayTimeIntegrator::calcCoshSinhIntegral() const +{ + // Use exponential formualtion rather than cosh, sinh. + // Fewer terms (reused for each), but not guaranteed to be faster. + + const Double_t gammaH { gammaVal_ - 0.5 * deltaGammaVal_ }; + const Double_t gammaL { gammaVal_ + 0.5 * deltaGammaVal_ }; + + const Double_t tauH { 1.0 / gammaH }; + const Double_t tauL { 1.0 / gammaL }; + + const Double_t nL1 { -TMath::Exp(-gammaL * maxAbscissa_) * tauL }; + const Double_t nH1 { -TMath::Exp(-gammaH * maxAbscissa_) * tauH }; + const Double_t nL0 { -TMath::Exp(-gammaL * minAbscissa_) * tauL }; + const Double_t nH0 { -TMath::Exp(-gammaH * minAbscissa_) * tauH }; + + const Double_t coshIntegral { 0.5 * ( (nH1 + nL1) - (nH0 + nL0) ) }; + const Double_t sinhIntegral { 0.5 * ( (nH1 - nL1) - (nH0 - nL0) ) }; + + return {coshIntegral, sinhIntegral}; +} + diff --git a/src/LauSmearedBinnedEfficiencyDecayTimeIntegrator.cc b/src/LauSmearedBinnedEfficiencyDecayTimeIntegrator.cc new file mode 100644 index 0000000..5968493 --- /dev/null +++ b/src/LauSmearedBinnedEfficiencyDecayTimeIntegrator.cc @@ -0,0 +1,114 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSmearedBinnedEfficiencyDecayTimeIntegrator.cc + \brief File containing implementation of LauSmearedBinnedEfficiencyDecayTimeIntegrator class. +*/ + +#include +#include + +#include "TSystem.h" + +#include "LauSmearedBinnedEfficiencyDecayTimeIntegrator.hh" + +ClassImp(LauSmearedBinnedEfficiencyDecayTimeIntegrator); + + +LauSmearedBinnedEfficiencyDecayTimeIntegrator::LauSmearedBinnedEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauBinnedDecayTimeEfficiency& effModel, const LauDecayTimeResolution& resolModel ) +{ + switch ( physModel.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + // This do not make sense for us: + // - Hist has no need of an integrator + std::cerr << "ERROR in LauSmearedBinnedEfficiencyDecayTimeIntegrator::LauSmearedBinnedEfficiencyDecayTimeIntegrator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } + + // Extract the binning info from the efficiency model + const std::vector binInfo { effModel.getBinningInfo() }; + + // Check that the first low edge and the last high edge match the minAbscissaVal and maxAbscissaVal we've been given + if ( binInfo.front().loEdge != minAbscissaVal or binInfo.back().hiEdge != maxAbscissaVal ) { + std::cerr << "ERROR in LauSmearedBinnedEfficiencyDecayTimeIntegrator::LauSmearedBinnedEfficiencyDecayTimeIntegrator : binning info does not correspond to specified range\n"; + std::cerr << " : range: " << minAbscissaVal << " - " << maxAbscissaVal << "\n"; + for ( auto [ loEdge, hiEdge, efficiency ] : binInfo ) { + std::cerr << " : bin: " << loEdge << " - " << hiEdge << "\n"; + } + std::cerr << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + + // If all is well, create uniform integrators for each bin + efficiencies_.reserve( binInfo.size() ); + integrators_.reserve( binInfo.size() ); + for ( auto [ loEdge, hiEdge, efficiency ] : binInfo ) { + efficiencies_.emplace_back( efficiency ); + integrators_.emplace_back( loEdge, hiEdge, physModel, efficiencies_.back(), resolModel ); + } +} + +void LauSmearedBinnedEfficiencyDecayTimeIntegrator::cacheInfo( const std::vector& abscissaErrors ) +{ + for ( auto& integrator : integrators_ ) { + integrator.cacheInfo( abscissaErrors ); + } +} + +void LauSmearedBinnedEfficiencyDecayTimeIntegrator::propagateParUpdates() +{ + for ( auto& integrator : integrators_ ) { + integrator.propagateParUpdates(); + } +} + +LauDecayTimeNormTerms LauSmearedBinnedEfficiencyDecayTimeIntegrator::getNormTerms( const std::size_t iEvt ) const +{ + LauDecayTimeNormTerms normTerms; + + for ( const auto& integrator : integrators_ ) { + normTerms += integrator.getNormTerms( iEvt ); + } + + return normTerms; +} + +LauDecayTimeNormTerms LauSmearedBinnedEfficiencyDecayTimeIntegrator::calcNormTerms( const std::array& abscissaError ) const +{ + LauDecayTimeNormTerms normTerms; + + for ( const auto& integrator : integrators_ ) { + normTerms += integrator.calcNormTerms( abscissaError ); + } + + return normTerms; +} diff --git a/src/LauSmearedDecayTimeCalculator.cc b/src/LauSmearedDecayTimeCalculator.cc new file mode 100644 index 0000000..6f1ff1e --- /dev/null +++ b/src/LauSmearedDecayTimeCalculator.cc @@ -0,0 +1,222 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSmearedDecayTimeCalculator.cc + \brief File containing implementation of LauSmearedDecayTimeCalculator class. +*/ + +#include +#include +#include + +#include "RooMath.h" +#include "TMath.h" +#include "TSystem.h" + +#include "LauConstants.hh" +#include "LauSmearedDecayTimeCalculator.hh" + +ClassImp(LauSmearedDecayTimeCalculator); + + +LauSmearedDecayTimeCalculator::LauSmearedDecayTimeCalculator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauDecayTimeResolution& resolModel ) : + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physModel_{physModel}, + resolModel_{resolModel}, + tauVal_{physModel_.getLifetimeValue()}, + gammaVal_{physModel_.getGammaValue()}, + deltaMVal_{physModel_.getDeltaMValue()}, + deltaGammaVal_{physModel_.getDeltaGammaValue()}, + fracPromptVal_{physModel_.getFracPromptValue()}, + nGauss_{resolModel_.nGauss()}, + fractionVals_{resolModel_.getFractionValues()}, + meanVals_{resolModel_.getMeanValues()}, + widthVals_{resolModel_.getWidthValues()}, + scaleMeans_{resolModel_.scaleMeans()}, + scaleWidths_{resolModel_.scaleWidths()}, + physicsParamChanged_{physModel_.anythingChanged()}, + resoParamChanged_{resolModel_.anythingChanged()} +{ + switch ( physModel_.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + // This does not make sense for us: + // - Hist has no need of an integrator + std::cerr << "ERROR in LauSmearedDecayTimeCalculator::LauSmearedDecayTimeCalculator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + // TODO These are not yet implemented + std::cerr << "ERROR in LauSmearedDecayTimeCalculator::LauSmearedDecayTimeCalculator : Function type not yet supported" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } +} + +void LauSmearedDecayTimeCalculator::cacheInfo( const std::vector& abscissas, const std::vector& abscissaErrors ) +{ + abscissas_ = abscissas; + terms_.clear(); + terms_.resize( abscissas_.size() ); + + abscissaErrors_.clear(); + if ( abscissaErrors.empty() ) { + if ( resolModel_.scaleWithPerEventError() ) { + std::cerr << "ERROR in LauSmearedDecayTimeCalculator::cacheInfo : No per-event decay time errors provided" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + abscissaErrors_.push_back( { 1.0, 1.0 } ); + } else { + if ( abscissaErrors.size() != abscissas_.size() ) { + std::cerr << "ERROR in LauSmearedDecayTimeCalculator::cacheInfo : Sizes of decay time and decay time error vectors do not match" << std::endl; + std::cerr << " : decay time: " << abscissas_.size() << std::endl; + std::cerr << " : decay time error: " << abscissaErrors.size() << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + abscissaErrors_.reserve( abscissaErrors.size() ); + for ( auto error : abscissaErrors ) { + abscissaErrors_.push_back( { 1.0, error } ); + } + } + + this->updateCache(); +} + +void LauSmearedDecayTimeCalculator::updateCache() +{ + const std::size_t nEvents { abscissas_.size() }; + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + terms_[iEvt] = this->calcTerms( abscissas_[iEvt], abscissaErrors_[iEvt] ); + } +} + +void LauSmearedDecayTimeCalculator::propagateParUpdates() +{ + if ( physicsParamChanged_ or resoParamChanged_ ) { + this->updateCache(); + } +} + +LauDecayTimeTerms LauSmearedDecayTimeCalculator::calcTerms( const Double_t abscissa, const std::array& abscissaError ) +{ + /* + // TODO - this is how to calculate the Delta and DeltaExp cases - looks like they already include their normalisation + // - to be tidied up and integrated into the rest of the framework in due course + Double_t value(0.0); + + if (type == LauDecayTime::FuncType::Delta || type == LauDecayTime::FuncType::DeltaExp) { + + // Calculate the gaussian function(s) + for (std::size_t iGauss(0); iGauss 1e-10) { + Double_t scale = LauConstants::root2*sigma[iGauss]; + Double_t scale2 = LauConstants::rootPiBy2*sigma[iGauss]; + Double_t x = ( abscissa - mean[iGauss] ) / scale; + Double_t exponent = -x*x; + Double_t norm = scale2*(TMath::Erf((maxAbscissa_ - mean[iGauss])/scale) + - TMath::Erf((minAbscissa_ - mean[iGauss])/scale)); + value += frac[iGauss]*TMath::Exp(exponent)/norm; + } + } + + // TODO - calc exp term here + + if (type == LauDecayTime::FuncType::DeltaExp) { + value *= fracPrompt; + value += (1.0-fracPrompt)*expTerm_; + } else { + value = expTerm_; + } + } + */ + + LauDecayTimeTerms terms; + + const LauDecayTime::FuncType type { physModel_.getFunctionType() }; + + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + + const Double_t mean { meanVals_[iGauss] * abscissaError[scaleMeans_[iGauss]] }; + const Double_t widthOverRoot2 { widthVals_[iGauss] * abscissaError[scaleWidths_[iGauss]] / LauConstants::root2 }; + const Double_t x { ( abscissa - mean ) / ( 2.0 * widthOverRoot2 ) }; + + const std::complex z { gammaVal_ * widthOverRoot2 }; + const Double_t expTerm { this->smearedGeneralTerm( z, x ).real() }; + terms.expTerm += fractionVals_[iGauss] * expTerm; + + if ( type == LauDecayTime::FuncType::ExpTrig or type == LauDecayTime::FuncType::ExpHypTrig ) { + + const std::complex zTrig { gammaVal_ * widthOverRoot2, -deltaMVal_ * widthOverRoot2 }; + const std::complex trigTerm { this->smearedGeneralTerm( zTrig, x ) }; + + const Double_t cosTerm { trigTerm.real() }; + const Double_t sinTerm { trigTerm.imag() }; + + terms.cosTerm += fractionVals_[iGauss] * cosTerm; + terms.sinTerm += fractionVals_[iGauss] * sinTerm; + + if ( type == LauDecayTime::FuncType::ExpTrig ) { + + terms.coshTerm += fractionVals_[iGauss] * expTerm; + + } else { + + const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * widthOverRoot2 }; + const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * widthOverRoot2 }; + + const Double_t termH { this->smearedGeneralTerm( zH, x ).real() }; + const Double_t termL { this->smearedGeneralTerm( zL, x ).real() }; + + const Double_t coshTerm { 0.5 * (termH + termL) }; + const Double_t sinhTerm { 0.5 * (termH - termL) }; + + terms.coshTerm += fractionVals_[iGauss] * coshTerm; + terms.sinhTerm += fractionVals_[iGauss] * sinhTerm; + } + } + } + + return terms; +} + +std::complex LauSmearedDecayTimeCalculator::smearedGeneralTerm( const std::complex& z, const Double_t x ) const +{ + using namespace std::complex_literals; + + const Double_t x2 { x * x }; + + const std::complex arg1 { 1i * (z - x) }; + + const std::complex arg2 { -(x2) - (arg1*arg1) }; + + const std::complex conv { ( arg1.imag() < -5.0 ) ? 0.5 * std::exp(arg2) * RooMath::erfc( -1i * arg1 ) : 0.5 * std::exp(-(x2)) * RooMath::faddeeva(arg1) }; + + return conv; +} diff --git a/src/LauSmearedUniformEfficiencyDecayTimeIntegrator.cc b/src/LauSmearedUniformEfficiencyDecayTimeIntegrator.cc new file mode 100644 index 0000000..36b3234 --- /dev/null +++ b/src/LauSmearedUniformEfficiencyDecayTimeIntegrator.cc @@ -0,0 +1,213 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSmearedUniformEfficiencyDecayTimeIntegrator.cc + \brief File containing implementation of LauSmearedUniformEfficiencyDecayTimeIntegrator class. +*/ + +#include +#include + +#include "TSystem.h" +#include "RooMath.h" + +#include "LauConstants.hh" +#include "LauSmearedUniformEfficiencyDecayTimeIntegrator.hh" + +ClassImp(LauSmearedUniformEfficiencyDecayTimeIntegrator); + + +LauSmearedUniformEfficiencyDecayTimeIntegrator::LauSmearedUniformEfficiencyDecayTimeIntegrator( const Double_t minAbscissaVal, const Double_t maxAbscissaVal, const LauDecayTimePhysicsModel& physModel, const LauUniformDecayTimeEfficiency& effModel, const LauDecayTimeResolution& resolModel ) : + minAbscissa_{minAbscissaVal}, + maxAbscissa_{maxAbscissaVal}, + physModel_{physModel}, + effModel_{effModel}, + resolModel_{resolModel}, + tauVal_{physModel_.getLifetimeValue()}, + gammaVal_{physModel_.getGammaValue()}, + deltaMVal_{physModel_.getDeltaMValue()}, + deltaGammaVal_{physModel_.getDeltaGammaValue()}, + fracPromptVal_{physModel_.getFracPromptValue()}, + nGauss_{resolModel_.nGauss()}, + fractionVals_{resolModel_.getFractionValues()}, + meanVals_{resolModel_.getMeanValues()}, + widthVals_{resolModel_.getWidthValues()}, + scaleMeans_{resolModel_.scaleMeans()}, + scaleWidths_{resolModel_.scaleWidths()}, + physicsParamChanged_{physModel_.anythingChanged()}, + resoParamChanged_{resolModel_.anythingChanged()} +{ + switch ( physModel_.getFunctionType() ) { + case LauDecayTime::FuncType::Hist : + // This does not make sense for us: + // - Hist has no need of an integrator + std::cerr << "ERROR in LauSmearedUniformEfficiencyDecayTimeIntegrator::LauSmearedUniformEfficiencyDecayTimeIntegrator : Unsupported function type in the physics model" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Delta : + case LauDecayTime::FuncType::DeltaExp : + // TODO These are not yet implemented + std::cerr << "ERROR in LauSmearedUniformEfficiencyDecayTimeIntegrator::LauSmearedUniformEfficiencyDecayTimeIntegrator : Function type not yet supported" << std::endl; + gSystem->Exit(EXIT_FAILURE); + break; + case LauDecayTime::FuncType::Exp : + case LauDecayTime::FuncType::ExpTrig : + case LauDecayTime::FuncType::ExpHypTrig : + // All fine, we can deal with these + break; + } +} + +void LauSmearedUniformEfficiencyDecayTimeIntegrator::cacheInfo( const std::vector& abscissaErrors ) +{ + abscissaErrors_.clear(); + normTerms_.clear(); + if ( abscissaErrors.empty() ) { + if ( resolModel_.scaleWithPerEventError() ) { + std::cerr << "ERROR in LauSmearedUniformEfficiencyDecayTimeIntegrator::cacheInfo : No per-event decay time errors provided" << std::endl; + gSystem->Exit(EXIT_FAILURE); + } + const std::array abscissaError { 1.0, 1.0 }; + abscissaErrors_.push_back( abscissaError ); + normTerms_.push_back( this->calcNormTerms( abscissaError ) ); + } else { + const std::size_t nEvents { abscissaErrors.size() }; + abscissaErrors_.reserve( nEvents ); + normTerms_.reserve( nEvents ); + std::array abscissaError { 1.0, 1.0 }; + for ( auto error : abscissaErrors ) { + abscissaError = { 1.0, error }; + abscissaErrors_.push_back( abscissaError ); + normTerms_.push_back( this->calcNormTerms( abscissaError ) ); + } + } +} + +void LauSmearedUniformEfficiencyDecayTimeIntegrator::propagateParUpdates() +{ + if ( not ( physicsParamChanged_ or resoParamChanged_ ) ) { + return; + } + + const std::size_t nEvents { abscissaErrors_.size() }; + for ( std::size_t iEvt{0}; iEvt < nEvents; ++iEvt ) { + normTerms_[iEvt] = this->calcNormTerms( abscissaErrors_[iEvt] ); + } +} + +LauDecayTimeNormTerms LauSmearedUniformEfficiencyDecayTimeIntegrator::calcNormTerms( const std::array& abscissaError ) const +{ + LauDecayTimeNormTerms normTerms; + + // Sum the integral for each Gaussian + for ( std::size_t iGauss{0}; iGauss < nGauss_; ++iGauss ) { + normTerms += this->calcIntegrals( abscissaError, iGauss ); + } + + // Scale by the efficiency + const Double_t eff { effModel_.getEfficiency( minAbscissa_ ) }; + normTerms *= eff; + + return normTerms; +} + +LauDecayTimeNormTerms LauSmearedUniformEfficiencyDecayTimeIntegrator::calcIntegrals( const std::array& abscissaError, const std::size_t iGauss ) const +{ + LauDecayTimeNormTerms normTerms; + + const Double_t scaledMean { meanVals_[iGauss] * abscissaError[scaleMeans_[iGauss]] }; + const Double_t scaledWidth { widthVals_[iGauss] * abscissaError[scaleWidths_[iGauss]] }; + const Double_t fraction { fractionVals_[iGauss] }; + + const Double_t sigmaOverRoot2 { scaledWidth / LauConstants::root2 }; + + const std::complex z { gammaVal_ * sigmaOverRoot2, 0.0 }; + const std::complex integral { this->smearedGeneralIntegral( z, sigmaOverRoot2, scaledMean ) }; + const Double_t normTermExp { integral.real() }; + + normTerms.expTerm = fraction * normTermExp; + + const LauDecayTime::FuncType funcType { physModel_.getFunctionType() }; + if ( funcType == LauDecayTime::FuncType::ExpTrig or funcType == LauDecayTime::FuncType::ExpHypTrig ) { + + const std::complex zTrig { gammaVal_ * sigmaOverRoot2, -deltaMVal_ * sigmaOverRoot2 }; + const std::complex integralTrig { this->smearedGeneralIntegral( zTrig, sigmaOverRoot2, scaledMean ) }; + + const Double_t cosIntegral { integralTrig.real() }; + const Double_t sinIntegral { integralTrig.imag() }; + + normTerms.cosTerm = fraction * cosIntegral; + normTerms.sinTerm = fraction * sinIntegral; + + if ( funcType == LauDecayTime::FuncType::ExpTrig ) { + + normTerms.coshTerm = normTerms.expTerm; + + } else { + + // Heavy (H) eigenstate case + const std::complex zH { (gammaVal_ - 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 }; + const std::complex integralH { this->smearedGeneralIntegral( zH, sigmaOverRoot2, scaledMean ) }; + + // Light (L) eigenstate case + const std::complex zL { (gammaVal_ + 0.5 * deltaGammaVal_) * sigmaOverRoot2, 0.0 };; + const std::complex integralL { this->smearedGeneralIntegral( zL, sigmaOverRoot2, scaledMean ) }; + + const std::complex coshIntegral { 0.5 * (integralH + integralL) }; + const std::complex sinhIntegral { 0.5 * (integralH - integralL) }; + + normTerms.coshTerm = fraction * coshIntegral.real(); + normTerms.sinhTerm = fraction * sinhIntegral.real(); + } + } + + return normTerms; +} + +std::complex LauSmearedUniformEfficiencyDecayTimeIntegrator::smearedGeneralIntegral(const std::complex& z, const Double_t sigmaOverRoot2, const Double_t mu) const +{ + using namespace std::complex_literals; + + const Double_t x1 { (maxAbscissa_ - mu) / (2.0 * sigmaOverRoot2) }; + const Double_t x0 { (minAbscissa_ - mu) / (2.0 * sigmaOverRoot2) }; + + const std::complex arg1 { 1i * (z - x1) }; + const std::complex arg0 { 1i * (z - x0) }; + + std::complex integral = 0.0 + 0i; + + if(arg1.imag() < -5.0) + {integral = RooMath::erf(x1) - std::exp(-(x1*x1) - (arg1*arg1)) * RooMath::erfc(-1i * arg1);} + else + {integral = RooMath::erf(x1) - TMath::Exp(-(x1*x1)) * RooMath::faddeeva(arg1);} + + if(arg0.imag() < -5.0) + {integral -= RooMath::erf(x0) - std::exp(-(x0*x0) - (arg0*arg0)) * RooMath::erfc(-1i * arg0);} + else + {integral -= RooMath::erf(x0) - TMath::Exp(-(x0*x0)) * RooMath::faddeeva(arg0);} + + integral *= (sigmaOverRoot2 / (2.0 * z)); + + return integral; +} diff --git a/src/LauSplineDecayTimeEfficiency.cc b/src/LauSplineDecayTimeEfficiency.cc new file mode 100644 index 0000000..6b0d470 --- /dev/null +++ b/src/LauSplineDecayTimeEfficiency.cc @@ -0,0 +1,129 @@ + +/* +Copyright 2021 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +/*! \file LauSplineDecayTimeEfficiency.cc + \brief File containing the implementation of the LauSplineDecayTimeEfficiency class. +*/ + +#include +#include + +#include "Rtypes.h" +#include "TSystem.h" + +#include "Lau1DCubicSpline.hh" +#include "LauSplineDecayTimeEfficiency.hh" +#include "LauParameter.hh" +#include "LauParamFixed.hh" + +ClassImp(LauSplineDecayTimeEfficiency); + +LauSplineDecayTimeEfficiency::LauSplineDecayTimeEfficiency( std::unique_ptr effSpline ) : effSpline_{std::move(effSpline)} +{ + if ( not effSpline_ ) { + std::cerr<<"ERROR in LauSplineDecayTimeEfficiency::LauSplineDecayTimeEfficiency : supplied efficiency spline pointer is null"<Exit(EXIT_FAILURE); + } + + values_ = effSpline_->getYValues(); + + const std::size_t nPars { values_.size() }; + ownedParams_.reserve( nPars ); + + for( std::size_t index{0}; index < nPars; ++index ) { + ownedParams_.emplace_back( std::make_unique( Form( "dteff_knot_%lu", index ), values_[index], 0.0, 1.0, kTRUE ) ); + } + + params_.reserve( nPars ); + for ( auto& ptr : ownedParams_ ) { + params_.push_back( ptr.get() ); + } +} + +void LauSplineDecayTimeEfficiency::fixKnots() +{ + for ( auto& ptr : ownedParams_ ) { + ptr->fixed(true); + } +} + +void LauSplineDecayTimeEfficiency::floatKnots() +{ + for ( auto& ptr : ownedParams_ ) { + ptr->fixed(false); + } +} + +void LauSplineDecayTimeEfficiency::fixKnot( const std::size_t knotIndex, const bool fixed ) +{ + if ( knotIndex >= ownedParams_.size() ) { + std::cerr<<"ERROR in LauSplineDecayTimeEfficiency::fixKnot : supplied knot index is out of range"<fixed(fixed); +} + +Double_t LauSplineDecayTimeEfficiency::getEfficiency( const Double_t abscissa ) const +{ + return effSpline_->evaluate( abscissa ); +} + +void LauSplineDecayTimeEfficiency::initialise() +{ + LauParamFixed isFixed; + anyKnotFloating_ = not std::all_of( params_.begin(), params_.end(), isFixed ); + + this->updateParameterCache(); +} + +void LauSplineDecayTimeEfficiency::updateParameterCache() +{ + // Update the spline itself + effSpline_->updateYValues( params_ ); + + static auto assignValue = [](const LauAbsRValue* par){return par->unblindValue();}; + + // Update our local cache + std::transform( params_.begin(), params_.end(), values_.begin(), assignValue ); +} + +void LauSplineDecayTimeEfficiency::propagateParUpdates() +{ + // If none of the parameters are floating there's nothing to do + if ( not anyKnotFloating_ ) { + return; + } + + // Otherwise, determine which of the floating parameters have changed (if any) and act accordingly + + // TODO - these lambda's are repeated in lots of places, should we put them in some utility include? + static auto checkEquality = [](const LauAbsRValue* par, const Double_t cacheVal){return par->unblindValue() == cacheVal;}; + + anyKnotChanged_ = not std::equal(params_.begin(), params_.end(), values_.begin(), checkEquality); + + // Update the acceptance spline if any of the knot values have changed + if ( anyKnotChanged_ ) { + this->updateParameterCache(); + } +} diff --git a/src/LauTimeDepFitModel.cc b/src/LauTimeDepFitModel.cc index 6958686..5ad0476 100644 --- a/src/LauTimeDepFitModel.cc +++ b/src/LauTimeDepFitModel.cc @@ -1,3386 +1,3367 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauTimeDepFitModel.cc \brief File containing implementation of LauTimeDepFitModel class. */ #include #include #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauParamFixed.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepFitModel.hh" #include "LauFlavTag.hh" ClassImp(LauTimeDepFitModel) LauTimeDepFitModel::LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag) : LauAbsFitModel(), sigModelB0bar_(modelB0bar), sigModelB0_(modelB0), kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0), kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0), usingBkgnd_(kFALSE), flavTag_(flavTag), curEvtTrueTagFlv_(LauFlavTag::Unknown), curEvtDecayFlv_(LauFlavTag::Unknown), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), nCalibPar_(0), nTagEffPar_(0), nEffiPar_(0), nAsymPar_(0), coeffsB0bar_(0), coeffsB0_(0), coeffPars_(0), fitFracB0bar_(0), fitFracB0_(0), fitFracAsymm_(0), acp_(0), meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0), meanEffB0_("meanEffB0",0.0,0.0,1.0), DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0), DPRateB0_("DPRateB0",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), cpevVarName_(""), cpEigenValue_(CPEven), evtCPEigenVals_(0), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdf_(), BkgndTypes_(flavTag_->getBkgndTypes()), BkgndDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), sigExtraPdf_(), sigFlavTagPdf_(), bkgdFlavTagPdf_(), AProd_("AProd",0.0,-1.0,1.0,kTRUE), iterationsMax_(100000000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigFlavTagLike_(0.0), bkgdFlavTagLike_(0.0), sigTotalLike_(0.0) { // Set up ftag here? this->setBkgndClassNames(flavTag_->getBkgndNames()); const ULong_t nBkgnds { this->nBkgndClasses() }; if ( nBkgnds > 0 ){ usingBkgnd_ = kTRUE; for ( ULong_t iBkgnd{0}; iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass { this->bkgndClassName( iBkgnd ) }; AProdBkgnd_[iBkgnd] = new LauParameter("AProd_"+bkgndClass,0.0,-1.0,1.0,kTRUE); } } // Make sure that the integration scheme will be symmetrised sigModelB0bar_->forceSymmetriseIntegration(kTRUE); sigModelB0_->forceSymmetriseIntegration(kTRUE); } LauTimeDepFitModel::~LauTimeDepFitModel() { for ( LauAbsPdf* pdf : sigExtraPdf_ ) { delete pdf; } for(auto& data : bkgndTree_){ delete data; } } void LauTimeDepFitModel::setupBkgndVectors() { UInt_t nBkgnds { this->nBkgndClasses() }; AProdBkgnd_.resize( nBkgnds ); BkgndDPModelsB_.resize( nBkgnds ); BkgndDPModelsBbar_.resize( nBkgnds ); BkgndDecayTimePdfs_.resize( nBkgnds ); BkgndPdfs_.resize( nBkgnds ); bkgndEvents_.resize( nBkgnds ); bkgndAsym_.resize( nBkgnds ); bkgndTree_.resize( nBkgnds ); reuseBkgnd_.resize( nBkgnds ); bkgndDPLike_.resize( nBkgnds ); bkgndExtraLike_.resize( nBkgnds ); bkgndTotalLike_.resize( nBkgnds ); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBgkndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } nBkgndEvents->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID] = new LauParameter(nBkgndEvents->name()+"Asym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( bkgndAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } bkgndEvents_[bkgndID]->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID]->name( nBkgndEvents->name()+"Asym" ); if ( bkgndAsym->isLValue() ) { LauParameter* asym = dynamic_cast( bkgndAsym ); asym->range(-1.0, 1.0); } bkgndAsym_[bkgndID] = bkgndAsym; } void LauTimeDepFitModel::setSignalDtPdf(LauDecayTimePdf* pdf) { if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalDtPdf : The PDF pointer is null, not adding it."<validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDtPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDecayTimePdfs_[bkgndID] = pdf; usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* BModel, LauAbsBkgndDPModel* BbarModel) { if (BModel==nullptr) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the particle model." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDPModelsB_[bkgndID] = BModel; if (BbarModel==nullptr) { std::cout << "INFO in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the anti-particle model. Using only the particle model." << std::endl; BkgndDPModelsBbar_[bkgndID] = nullptr; } else { BkgndDPModelsBbar_[bkgndID] = BbarModel; } usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setSignalPdfs(LauAbsPdf* pdf) { // These "extra variables" are assumed to be purely kinematical, like mES and DeltaE //or making use of Rest of Event information, and therefore independent of whether //the parent is a B0 or a B0bar. If this assupmtion doesn't hold, do modify this part! if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalPdfs : The PDF pointer is null."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndPdfs_[bkgndID].push_back(pdf); usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos) { phiMix_.value(phiMix); phiMix_.initValue(phiMix); phiMix_.genValue(phiMix); phiMix_.fixed(fixPhiMix); const Double_t sinPhiMix = TMath::Sin(phiMix); sinPhiMix_.value(sinPhiMix); sinPhiMix_.initValue(sinPhiMix); sinPhiMix_.genValue(sinPhiMix); sinPhiMix_.fixed(fixPhiMix); const Double_t cosPhiMix = TMath::Cos(phiMix); cosPhiMix_.value(cosPhiMix); cosPhiMix_.initValue(cosPhiMix); cosPhiMix_.genValue(cosPhiMix); cosPhiMix_.fixed(fixPhiMix); useSinCos_ = useSinCos; phiMixComplex_.setRealPart(cosPhiMix); phiMixComplex_.setImagPart(-1.0*sinPhiMix); } void LauTimeDepFitModel::initialise() { // From the initial parameter values calculate the coefficients // so they can be passed to the signal model this->updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } // Flavour tagging //flavTag_->initialise(); // Decay-time PDFs signalDecayTimePdf_->initialise(); //Initialise for backgrounds if necessary for (auto& pdf : BkgndDecayTimePdfs_){ pdf->initialise(); } if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Next check that, if a given component is being used we've got the // right number of PDFs for all the variables involved // TODO - should probably check variable names and so on as well //UInt_t nsigpdfvars(0); //for ( LauPdfList::const_iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nsigpdfvars; // } // } //} //if (usingBkgnd_) { // for (LauBkgndPdfsList::const_iterator bgclass_iter = BkgndPdfsB0_.begin(); bgclass_iter != BkgndPdfsB0_.end(); ++bgclass_iter) { // UInt_t nbkgndpdfvars(0); // const LauPdfList& pdfList = (*bgclass_iter); // for ( LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nbkgndpdfvars; // } // } // } // if (nbkgndpdfvars != nsigpdfvars) { // std::cerr << "ERROR in LauTimeDepFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // } //} // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Handle flavour-tagging calibration parameters this->setCalibParams(); // Add tagging efficiency parameters this->setTagEffParams(); - // Add the efficiency parameters - this->setEffiParams(); - //Asymmetry terms AProd and in setAsymmetries()? this->setAsymParams(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_ + nCalibPar_ + nTagEffPar_ + nEffiPar_ + nAsymPar_)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } if (sigModelB0_ == 0) { std::cerr<<"ERROR in LauTimeDepFitModel::initialiseDPModels : B0 signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp(); const UInt_t nAmpB0 = sigModelB0_->getnTotAmp(); if ( nAmpB0bar != nAmpB0 ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepFitModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); fifjEffSum_.clear(); fifjEffSum_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); // Add backgrounds if (usingBkgnd_ == kTRUE) { for (auto& model : BkgndDPModelsB_){ model->initialise(); } for (auto& model : BkgndDPModelsBbar_){ if (model != nullptr) { model->initialise(); } } } } void LauTimeDepFitModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos(); const std::vector& integralInfoListB0 = sigModelB0_->getIntegralInfos(); // TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc. LauComplex A, Abar, fifjEffSumTerm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions = integralInfoListB0bar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion]; const UInt_t nm13Points = integralInfoB0bar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight = integralInfoB0bar->getWeight(m13,m23); const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23); const Double_t effWeight = eff*weight; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A = integralInfoB0->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm = Abar*A.conj(); fifjEffSumTerm.rescale(effWeight); fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm; } } } } } } void LauTimeDepFitModel::calcInterTermNorm() { const std::vector& fNormB0bar = sigModelB0bar_->getFNorm(); const std::vector& fNormB0 = sigModelB0_->getFNorm(); LauComplex norm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj(); coeffTerm *= fifjEffSum_[iAmp][jAmp]; coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]); norm += coeffTerm; } } norm *= phiMixComplex_; interTermReNorm_ = 2.0*norm.re(); interTermImNorm_ = 2.0*norm.im(); } void LauTimeDepFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Is there a component called compName in the signal models? TString compName = coeffSet->name(); TString conjName = sigModelB0bar_->getConjResName(compName); const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters(); const LauDaughters* daughtersB0 = sigModelB0_->getDaughters(); const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 ); if ( ! sigModelB0bar_->hasResonance(compName) ) { if ( ! sigModelB0bar_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<name( compName ); } if ( conjugate ) { if ( ! sigModelB0_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<hasResonance(compName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<name() == compName) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffPars_.push_back(coeffSet); TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0)); acp_.push_back(coeffSet->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepFitModel::setAmpCoeffSet : Added coefficients for component \""<acp(); LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauTimeDepFitModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; ++i) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (auto& param : pars){ if ( !param->clone() ) { fitVars.push_back(param); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters(); LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters(); for (auto& param : sigDPParsB0bar){ if ( resVars.insert(param).second ) { fitVars.push_back(param); ++nSigDPPar_; } } for (auto& param : sigDPParsB0){ if ( resVars.insert(param).second ) { fitVars.push_back(param); ++nSigDPPar_; } } } UInt_t LauTimeDepFitModel::addParametersToFitList(std::vector theVector) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (auto& pdf : theVector){ // grab the pdf and then its parameters LauAbsRValuePList& rvalues = pdf->getParameters(); // loop through the parameters for (auto& parlist : rvalues){ LauParameterPList params = parlist->getPars(); for (auto& par : params){ // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !par->clone() && ( !par->fixed() || (this->twoStageFit() && par->secondStage()) ) ) { fitVars.push_back(par); ++counter; } } } } return counter; } UInt_t LauTimeDepFitModel::addParametersToFitList(LauPdfList* theList) { UInt_t counter(0); counter += this->addFitParameters(*(theList)); return counter; } void LauTimeDepFitModel::setDecayTimeParameters() { nDecayTimePar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setDecayTimeParameters : Setting the initial fit parameters of the DecayTime Pdfs." << std::endl; LauParameterPList& fitVars = this->fitPars(); // Loop over the Dt PDFs LauAbsRValuePList& rvalues = signalDecayTimePdf_->getParameters(); // loop through the parameters for (auto& parlist : rvalues){ LauParameterPList params = parlist->getPars(); for (auto& par : params){ // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !par->clone() && ( !par->fixed() || (this->twoStageFit() && par->secondStage()) ) ) { fitVars.push_back(par); ++nDecayTimePar_; } } } if (usingBkgnd_){ nDecayTimePar_ += this->addParametersToFitList(BkgndDecayTimePdfs_); } if (useSinCos_) { if ( not sinPhiMix_.fixed() ) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } } else { if ( not phiMix_.fixed() ) { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } } void LauTimeDepFitModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setExtraPdfParameters : Setting the initial fit parameters of the extra Pdfs." << std::endl; nExtraPdfPar_ += this->addFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (auto& pdf : BkgndPdfs_){ nExtraPdfPar_ += this->addFitParameters(pdf); } } } void LauTimeDepFitModel::setFitNEvents() { nNormPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setFitNEvents : Setting the initial fit parameters of the signal and background yields." << std::endl; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for signal and background components..."<fixed() ) { fitVars.push_back(signalEvents_); ++nNormPar_; } } else { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for background components (and hence signal)..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // TODO arguably should delegate this //LauTagCatParamMap& signalTagCatFrac = flavTag_->getSignalTagCatFrac(); // tagging-category fractions for signal events //for (LauTagCatParamMap::iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // if (iter == signalTagCatFrac.begin()) { // continue; // } // LauParameter* par = &((*iter).second); // fitVars.push_back(par); // ++nNormPar_; //} // Backgrounds if (usingBkgnd_ == kTRUE) { for (auto& params : bkgndEvents_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { if (parameter->fixed()){continue;} if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } for (auto& params : bkgndAsym_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { if (parameter->fixed()){continue;} if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } } } void LauTimeDepFitModel::setAsymParams() { nAsymPar_ = 0; //Signal LauParameterPList& fitVars = this->fitPars(); if (!AProd_.fixed()){ fitVars.push_back(&AProd_); nAsymPar_+=1; } //Background(s) for(auto& AProd : AProdBkgnd_){ if (AProd->fixed()){continue;} fitVars.push_back(AProd); nAsymPar_+=1; } } void LauTimeDepFitModel::setTagEffParams() { nTagEffPar_ = 0; Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setTagEffParams : Setting the initial fit parameters for flavour tagging efficiencies." << std::endl; if (useAltPars){ std::vector tageff_ave = flavTag_->getTagEffAve(); std::vector tageff_delta = flavTag_->getTagEffDelta(); LauParameterPList& fitVars = this->fitPars(); for(auto& eff : tageff_ave){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(auto& eff : tageff_delta){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } else { std::vector tageff_b0 = flavTag_->getTagEffB0(); std::vector tageff_b0bar = flavTag_->getTagEffB0bar(); LauParameterPList& fitVars = this->fitPars(); for(auto& eff : tageff_b0){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } for(auto& eff : tageff_b0bar){ if (eff==nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } if (usingBkgnd_){ if (useAltPars){ std::vector> tageff_ave = flavTag_->getTagEffBkgndAve(); std::vector> tageff_delta = flavTag_->getTagEffBkgndDelta(); LauParameterPList& fitVars = this->fitPars(); for(auto& innerVec : tageff_ave){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } for(auto& innerVec : tageff_delta){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } } else { std::vector> tageff_b0 = flavTag_->getTagEffBkgndB0(); std::vector> tageff_b0bar = flavTag_->getTagEffBkgndB0bar(); LauParameterPList& fitVars = this->fitPars(); for(auto& innerVec : tageff_b0){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } for(auto& innerVec : tageff_b0bar){ for(auto& eff : innerVec){ if (eff == nullptr){continue;} if (eff->fixed()){continue;} fitVars.push_back(eff); ++nTagEffPar_; } } } } } void LauTimeDepFitModel::setCalibParams() { Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setCalibParams : Setting the initial fit parameters of the flavour tagging calibration parameters." << std::endl; if (useAltPars){ std::vector p0pars_ave = flavTag_->getCalibP0Ave(); std::vector p0pars_delta = flavTag_->getCalibP0Delta(); std::vector p1pars_ave = flavTag_->getCalibP1Ave(); std::vector p1pars_delta = flavTag_->getCalibP1Delta(); LauParameterPList& fitVars = this->fitPars(); for(auto& p0 : p0pars_ave){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p0 : p0pars_delta){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p1 : p1pars_ave){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(auto& p1 : p1pars_delta){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } else { std::vector p0pars_b0 = flavTag_->getCalibP0B0(); std::vector p0pars_b0bar = flavTag_->getCalibP0B0bar(); std::vector p1pars_b0 = flavTag_->getCalibP1B0(); std::vector p1pars_b0bar = flavTag_->getCalibP1B0bar(); LauParameterPList& fitVars = this->fitPars(); for(auto& p0 : p0pars_b0){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p0 : p0pars_b0bar){ if (p0->fixed()){continue;} fitVars.push_back(p0); ++nCalibPar_; } for(auto& p1 : p1pars_b0){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } for(auto& p1 : p1pars_b0bar){ if (p1->fixed()){continue;} fitVars.push_back(p1); ++nCalibPar_; } } } -void LauTimeDepFitModel::setEffiParams() -{ - nEffiPar_ = 0; - LauParameterPList& fitVars = this->fitPars(); - - LauParameterPList& effiPars = signalDecayTimePdf_->getEffiPars(); - - // If all of the knots are fixed we have nothing to do - LauParamFixed isFixed; - if ( std::all_of( effiPars.begin(), effiPars.end(), isFixed ) ) { - return; - } - - // If any knots are floating, add all knots (fixed or floating) - for(auto& par : effiPars){ - fitVars.push_back(par); - ++nEffiPar_; - } -} - void LauTimeDepFitModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 and B0bar fit fractions for each signal component fitFracB0bar_ = sigModelB0bar_->getFitFractions(); if (fitFracB0bar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue(); meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar); extraVars.push_back(meanEffB0bar_); Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue(); meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0); extraVars.push_back(meanEffB0_); // Also add in the DP rates Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue(); DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar); extraVars.push_back(DPRateB0bar_); Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue(); DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0); extraVars.push_back(DPRateB0_); } void LauTimeDepFitModel::setAsymmetries(const Double_t AProd, const Bool_t AProdFix){ AProd_.value(AProd); AProd_.fixed(AProdFix); } void LauTimeDepFitModel::setBkgndAsymmetries(const TString& bkgndClass, const Double_t AProd, const Bool_t AProdFix){ // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndAsymmetries : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); AProdBkgnd_[bkgndID]->value( AProd ); AProdBkgnd_[bkgndID]->genValue( AProd ); AProdBkgnd_[bkgndID]->initValue( AProd ); AProdBkgnd_[bkgndID]->fixed( AProdFix ); } void LauTimeDepFitModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters signalDecayTimePdf_->updatePulls(); // and for backgrounds if required if (usingBkgnd_){ for (auto& pdf : BkgndDecayTimePdfs_){ pdf->updatePulls(); } } if (useSinCos_) { if ( not sinPhiMix_.fixed() ) { sinPhiMix_.updatePull(); cosPhiMix_.updatePull(); } } else { this->checkMixingPhase(); } if (usingBkgnd_ == kTRUE) { for (auto& params : bkgndEvents_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } for (auto& params : bkgndAsym_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } } // Update the pulls on all the extra PDFs' parameters this->updateFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (auto& pdf : BkgndPdfs_){ this->updateFitParameters(pdf); } } // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo(); sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo(); LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions(); if (fitFracB0bar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepFitModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B0bar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output<<"B0bar FitFraction for component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout<<"\\hline"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_.value()); fout << "$ & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(sigExtraPdf_, fout); if (usingBkgnd_ == kTRUE && !BkgndPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl; for (auto& pdf : BkgndPdfs_){ this->printFitParameters(pdf, fout); } } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepFitModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepFitModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepFitModel::LauGenInfo LauTimeDepFitModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } //TOD sigAysm doesn't do anything here? Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); //TODO fill in here if we care } else { Double_t rateB0bar = sigModelB0bar_->getDPRate().value(); Double_t rateB0 = sigModelB0_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } //for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // const LauParameter& par = iter->second; // Double_t eventsbyTagCat = par.value() * nEvts; // if (this->doPoissonSmearing()) { // eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); // } // eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); //} //nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. nEvtsGen["signal"] = std::make_pair( nEvts, evtWeight ); } std::cout<<"INFO in LauTimeDepFitModel::eventsToGenerate : Generating toy MC with:"<bkgndClassName(bkgndID)<<" background events = "<genValue()<eventsToGenerate(); Bool_t genOK(kTRUE); Int_t evtNum(0); const UInt_t nBkgnds = this->nBkgndClasses(); std::vector bkgndClassNames(nBkgnds); std::vector bkgndClassNamesGen(nBkgnds); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); bkgndClassNames[iBkgnd] = name; bkgndClassNamesGen[iBkgnd] = "gen"+name; } // Loop over the hypotheses and generate the appropriate number of // events for each one for (auto& hypo : nEvts){ // find the category of events (e.g. signal) const TString& evtCategory(hypo.first); // Type const TString& type(hypo.first); // Number of events Int_t nEvtsGen( hypo.second.first ); // get the event weight for this category const Double_t evtWeight( hypo.second.second ); for (Int_t iEvt(0); iEvtsetGenNtupleDoubleBranchValue( "evtWeight", evtWeight ); if (evtCategory == "signal") { this->setGenNtupleIntegerBranchValue("genSig",1); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 ); } // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { this->setGenNtupleIntegerBranchValue("genSig",0); UInt_t bkgndID(0); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { Int_t gen(0); if ( bkgndClassNames[iBkgnd] == type ) { gen = 1; bkgndID = iBkgnd; } this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen ); } genOK = this->generateBkgndEvent(bkgndID); } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->setGenNtupleIntegerBranchValue(trueTagVarName, curEvtTrueTagFlv_); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName == "" ) { std::cerr<<"ERROR in LauTimeDepFitModel::genExpt : Decay flavour variable not set for QFS decay, see LauFlavTag::setDecayFlvVarName()."<Exit(EXIT_FAILURE); } else { this->setGenNtupleIntegerBranchValue(decayFlvVarName, curEvtDecayFlv_); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; // Loop over the taggers - values set via generateSignalEvent const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t i=0; isetGenNtupleIntegerBranchValue(tagVarNames[i], curEvtTagFlv_[i]); this->setGenNtupleDoubleBranchValue(mistagVarNames[i], curEvtMistag_[i]); } // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFitModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_->checkToyMC(kTRUE); sigModelB0_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events if (reuseSignal_ || !genOK) { if (signalTree_) { signalTree_->clearUsedList(); } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = bkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } return genOK; } Bool_t LauTimeDepFitModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_->squareDP(); doSquareDP &= kinematicsB0_->squareDP(); LauKinematics* kinematics(kinematicsB0bar_); if (this->useDP()) { if (signalTree_) { signalTree_->getEmbeddedEvent(kinematics); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); if (signalTree_->haveBranch("mcMatch")) { Int_t match = TMath::Nint(signalTree_->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // Now generate from the combined DP / decay-time PDF while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown; curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the DP position Double_t m13Sq{0.0}, m23Sq{0.0}; kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A and Abar for the given DP position sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq); // Generate decay time const Double_t tMin = signalDecayTimePdf_->minAbscissa(); const Double_t tMax = signalDecayTimePdf_->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Uniform(tMin,tMax); // Generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = signalDecayTimePdf_->generateError(kTRUE); // Calculate all the decay time info signalDecayTimePdf_->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar { sigModelB0bar_->getEvtDPAmp() }; const LauComplex& A { sigModelB0_->getEvtDPAmp() }; const Double_t ASq { A.abs2() }; const Double_t AbarSq { Abar.abs2() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // Also retrieve all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // and the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; if ( cpEigenValue_ == QFS) { // Calculate the total intensities for each flavour-specific final state const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dpEff * dtEff }; const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dpEff * dtEff }; const Double_t ASumSq { ATotSq + AbarTotSq }; // Finally we throw the dice to see whether this event should be generated (and, if so, which final state) const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ASumSq / aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASumSq > aSqMaxVar_) {aSqMaxVar_ = ASumSq;} if ( randNum <= ATotSq / aSqMaxSet_ ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } else { // Calculate the DP terms const Double_t aSqSum { ASq + AbarSq }; const Double_t aSqDif { ASq - AbarSq }; const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; const Double_t interTermIm { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.im() : -2.0 * inter.im() }; const Double_t interTermRe { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.re() : -2.0 * inter.re() }; // Combine DP and decay-time info for all terms const Double_t coshTerm { aSqSum * dtCosh }; const Double_t sinhTerm { interTermRe * dtSinh }; const Double_t cosTerm { aSqDif * dtCos }; const Double_t sinTerm { interTermIm * dtSin }; // Sum to obtain the total and multiply by the efficiency // Multiplying the cos and sin terms by the true flavour at production const Double_t ATotSq { ( coshTerm + sinhTerm + curEvtTrueTagFlv_ * ( cosTerm - sinTerm ) ) * dpEff * dtEff }; //Finally we throw the dice to see whether this event should be generated const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ATotSq/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ATotSq > aSqMaxVar_) {aSqMaxVar_ = ATotSq;} // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } } // end of while !generatedEvent loop } // end of if (signalTree_) else control } else { if ( signalTree_ ) { signalTree_->getEmbeddedEvent(0); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() ); this->generateExtraPdfValues(sigExtraPdf_, signalTree_); } // Check for problems with the embedding if (signalTree_ && (signalTree_->nEvents() == signalTree_->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } Bool_t LauTimeDepFitModel::generateBkgndEvent(UInt_t bkgndID) { // Generate Bkgnd event Bool_t genOK{kTRUE}; //Check necessary ingredients are in place //TODO these checks should be part of a general sanity check during the initialisation phase if (BkgndDPModelsB_[bkgndID] == nullptr){ std::cerr << "ERROR in LauTimeDepFitModel::generateBkgndEvent : Dalitz plot model is missing" << std::endl; gSystem->Exit(EXIT_FAILURE); } if (BkgndDecayTimePdfs_[bkgndID] == nullptr){ std::cerr << "ERROR in LauTimeDepFitModel::generateBkgndEvent : Decay time model is missing" << std::endl; gSystem->Exit(EXIT_FAILURE); } //TODO restore the ability to embed events from an external source //LauAbsBkgndDPModel* model(0); //LauEmbeddedData* embeddedData(0); //LauPdfList* extraPdfs(0); //LauKinematics* kinematics(0); //model = BkgndDPModels_[bkgndID]; //if (this->enableEmbedding()) { // // find the right embedded data for the current tagging category // LauTagCatEmbDataMap::const_iterator emb_iter = bkgndTree_[bkgndID].find(curEvtTagCat_); // embeddedData = (emb_iter != bkgndTree_[bkgndID].end()) ? emb_iter->second : 0; //} //extraPdfs = &BkgndPdfs_[bkgndID]; //kinematics = kinematicsB0bar_; //if (this->useDP()) { // if (embeddedData) { // embeddedData->getEmbeddedEvent(kinematics); // } else { // if (model == 0) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "ERROR in LauCPFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << bkgndClass << "\"." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // genOK = model->generate(); // } //} else { // if (embeddedData) { // embeddedData->getEmbeddedEvent(0); // } //} //if (genOK) { // this->generateExtraPdfValues(extraPdfs, embeddedData); //} //// Check for problems with the embedding //if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "WARNING in LauCPFitModel::generateBkgndEvent : Source of embedded " << bkgndClass << " events used up, clearing the list of used events." << std::endl; // embeddedData->clearUsedList(); //} // switch ( BkgndTypes_[bkgndID] ) { case LauFlavTag::Combinatorial: { // This doesn't really mean anything for combinatorial background // TODO But maybe we need some sort of asymmetry parameter here? Double_t random = LauRandom::randomFun()->Rndm(); if ( random <= 0.5 ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // generate the true decay flavour - again this doesn't make much sense for combinatorial so we just flip a coin // TODO - we maybe need an asymmetry parameter here as well? if ( cpEigenValue_ == CPEigenvalue::QFS ) { if (random <= 0.5 ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } } // generate the DP position BkgndDPModelsB_[bkgndID]->generate(); // generate decay time and its error curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematicsB0_ ); // generate the flavour tagging response flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_, bkgndID ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); break; } case LauFlavTag::FlavourSpecific: { - const LauDecayTimePdf::FuncType dtType { BkgndDecayTimePdfs_[bkgndID]->getFuncType() }; - if ( dtType == LauDecayTimePdf::FuncType::ExpTrig or dtType == LauDecayTimePdf::FuncType::ExpHypTrig ) { + const LauDecayTime::FuncType dtType { BkgndDecayTimePdfs_[bkgndID]->getFuncType() }; + if ( dtType == LauDecayTime::FuncType::ExpTrig or dtType == LauDecayTime::FuncType::ExpHypTrig ) { nGenLoop_ = 0; Bool_t generatedEvent{kFALSE}; do { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown; curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProdBkgnd_[bkgndID]->unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the DP position Double_t m13Sq{0.0}, m23Sq{0.0}; kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A^2 and Abar^2 for the given DP position BkgndDPModelsB_[bkgndID]->calcLikelihoodInfo(m13Sq, m23Sq); BkgndDPModelsBbar_[bkgndID]->calcLikelihoodInfo(m13Sq, m23Sq); // Generate decay time const Double_t tMin = BkgndDecayTimePdfs_[bkgndID]->minAbscissa(); const Double_t tMax = BkgndDecayTimePdfs_[bkgndID]->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Uniform(tMin,tMax); // Generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); // Calculate all the decay time info BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); // Retrieve the DP intensities const Double_t ASq { BkgndDPModelsB_[bkgndID]->getUnNormValue() }; const Double_t AbarSq { BkgndDPModelsBbar_[bkgndID]->getUnNormValue() }; // Also retrieve all the decay time terms const Double_t dtCos { BkgndDecayTimePdfs_[bkgndID]->getCosTerm() }; const Double_t dtCosh { BkgndDecayTimePdfs_[bkgndID]->getCoshTerm() }; // and the decay time acceptance const Double_t dtEff { BkgndDecayTimePdfs_[bkgndID]->getEffiTerm() }; if ( cpEigenValue_ == QFS) { // Calculate the total intensities for each flavour-specific final state const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dtEff }; const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dtEff }; const Double_t ASumSq { ATotSq + AbarTotSq }; // TODO - check if this really is the max possible const Double_t ASumSqMax { 2.0 * ( BkgndDPModelsB_[bkgndID]->getMaxHeight() + BkgndDPModelsBbar_[bkgndID]->getMaxHeight() ) }; // Finally we throw the dice to see whether this event should be generated (and, if so, which final state) const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ASumSq / ASumSqMax ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASumSq > ASumSqMax) { std::cerr << "WARNING in LauTimeDepFitModel::generateBkgndEvent : ASumSq > ASumSqMax" << std::endl; } if ( randNum <= ATotSq / ASumSqMax ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_, bkgndID ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } else { // Calculate the DP terms const Double_t aSqSum { ASq + AbarSq }; const Double_t aSqDif { ASq - AbarSq }; // Combine DP and decay-time info for all terms const Double_t coshTerm { aSqSum * dtCosh }; const Double_t cosTerm { aSqDif * dtCos }; // Sum to obtain the total and multiply by the efficiency // Multiplying the cos term by the true flavour at production const Double_t ATotSq { ( coshTerm + curEvtTrueTagFlv_ * cosTerm ) * dtEff }; // TODO - check if this really is the max possible const Double_t ATotSqMax { 2.0 * TMath::Max( BkgndDPModelsB_[bkgndID]->getMaxHeight(), BkgndDPModelsBbar_[bkgndID]->getMaxHeight() ) }; // Finally we throw the dice to see whether this event should be generated const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ATotSq/ATotSqMax ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ATotSq > ATotSqMax) { // TODO std::cerr << "WARNING in LauTimeDepFitModel::generateBkgndEvent : ATotSq > ATotSqMax" << std::endl; } // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_, bkgndID ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } } while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_); } else { // Hist, Delta, Exp, DeltaExp decay-time types // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProdBkgnd_[bkgndID]->unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // Since there are no oscillations for these decay-time types, // the true decay flavour must be equal to the true tag flavour curEvtDecayFlv_ = curEvtTrueTagFlv_; // generate the DP position if ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) { BkgndDPModelsB_[bkgndID]->generate(); } else { BkgndDPModelsBbar_[bkgndID]->generate(); } // generate decay time and its error const LauKinematics* kinematics { ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) ? kinematicsB0_ : kinematicsB0bar_ }; curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematics ); // generate the flavour tagging response flavTag_->generateBkgndEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_, bkgndID ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } break; } case LauFlavTag::SelfConjugate: // TODO break; case LauFlavTag::NonSelfConjugate: // TODO break; } return genOK; } void LauTimeDepFitModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->addGenNtupleIntegerBranch(trueTagVarName); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName == "" ) { std::cerr<<"ERROR in LauTimeDepFitModel::setupGenNtupleBranches : Decay flavour variable not set for QFS decay, see LauFlavTag::setDecayFlvVarName()."<Exit(EXIT_FAILURE); } else { this->addGenNtupleIntegerBranch(decayFlvVarName); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; const ULong_t nTaggers {flavTag_->getNTaggers()}; for (ULong_t position{0}; positionaddGenNtupleIntegerBranch(tagVarNames[position]); this->addGenNtupleDoubleBranch(mistagVarNames[position]); } if (this->useDP() == kTRUE) { // Let's add the decay time variables. this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varName()); - this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varErrName()); + if ( signalDecayTimePdf_->varErrName() != "" ) { + this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varErrName()); + } this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0Amp"); this->addGenNtupleDoubleBranch("imB0Amp"); this->addGenNtupleDoubleBranch("reB0barAmp"); this->addGenNtupleDoubleBranch("imB0barAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { const std::vector varNames{ pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { this->addGenNtupleDoubleBranch( varName ); } } } } void LauTimeDepFitModel::setDPDtBranchValues() { // Store the decay time variables. this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varName(),curEvtDecayTime_); - this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varErrName(),curEvtDecayTimeErr_); + if ( signalDecayTimePdf_->varErrName() != "" ) { + this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varErrName(),curEvtDecayTimeErr_); + } // CONVENTION WARNING // TODO check - for now use B0 for any tags //LauKinematics* kinematics(0); //if (curEvtTagFlv_[position]<0) { LauKinematics* kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar = sigModelB0bar_->getEvtDPAmp(); LauComplex A = sigModelB0_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0Amp", A.re()); this->setGenNtupleDoubleBranchValue("imB0Amp", A.im()); this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re()); this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0); } } } void LauTimeDepFitModel::generateExtraPdfValues(LauPdfList& extraPdfs, LauEmbeddedData* embeddedData) { // CONVENTION WARNING LauKinematics* kinematics = kinematicsB0_; //LauKinematics* kinematics(0); //if (curEvtTagFlv_<0) { // kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Generate from the extra PDFs for (auto& pdf : extraPdfs){ LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( pdf->varNames() ); } else { genValues = pdf->generate(kinematics); } for (auto& var : genValues){ TString varName = var.first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var.second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } void LauTimeDepFitModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0_->updateCoeffs(coeffsB0_); this->calcInterTermNorm(); } // Update the decay time normalisation if ( signalDecayTimePdf_ ) { signalDecayTimePdf_->propagateParUpdates(); } // TODO // - maybe also need to add an update of the background decay time PDFs here // Update the signal events from the background numbers if not doing an extended fit // And update the tagging category fractions this->updateSigEvents(); } void LauTimeDepFitModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. if (!this->doEMLFit()) { Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); for (auto& nBkgndEvents : bkgndEvents_){ if ( nBkgndEvents->isLValue() ) { LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*nTotEvts,2.0*nTotEvts); } } // Subtract background events (if any) from signal. if (usingBkgnd_ == kTRUE) { for (auto& nBkgndEvents : bkgndEvents_){ signalEvents -= nBkgndEvents->value(); } } if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } } void LauTimeDepFitModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); evtCPEigenVals_.clear(); const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else if ( cpEV == 0 ) { cpEigenValue_ = QFS; } else { std::cerr<<"WARNING in LauTimeDepFitModel::cacheInputFitVars : Unknown value: "<useDP() == kTRUE) { // DecayTime and SigmaDecayTime signalDecayTimePdf_->cacheInfo(*inputFitData); // cache all the backgrounds too for(auto& bg : BkgndDecayTimePdfs_) {bg->cacheInfo(*inputFitData);} } // Flavour tagging information flavTag_->cacheInputFitVars(inputFitData,signalDecayTimePdf_->varName()); // ...and then the extra PDFs if (not sigExtraPdf_.empty()){ this->cacheInfo(sigExtraPdf_, *inputFitData); } if(usingBkgnd_ == kTRUE){ for (auto& pdf : BkgndPdfs_){ this->cacheInfo(pdf, *inputFitData); } } if (this->useDP() == kTRUE) { sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); if (usingBkgnd_ == kTRUE) { for (auto& model : BkgndDPModelsB_){ model->fillDataTree(*inputFitData); } for (auto& model : BkgndDPModelsBbar_){ if (model != nullptr) { model->fillDataTree(*inputFitData); } } } } } Double_t LauTimeDepFitModel::getTotEvtLikelihood(const UInt_t iEvt) { // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds) this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds) this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); // TODO - consider what to do here - do we even want the option not to use the DP in this model? //if ( not this->useDP() ) { //signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); //} // Construct the total event likelihood Double_t likelihood { sigLike * signalEvents }; if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { // TODO // for combinatorial background (and perhaps others) this factor 0.5 needs to be here // to balance the factor 2 in the signal normalisation that arises from the sum over // tag decisions and integral over eta // for other (more signal-like) backgrounds where we need to think about things depending // on the tag decision and where there may be asymmetries as well this will (probably) arise naturally const Double_t bkgndEvents { 0.5 * bkgndEvents_[bkgndID]->unblindValue() }; likelihood += bkgndEvents*bkgndDPLike_[bkgndID]*bkgndExtraLike_[bkgndID]; } } return likelihood; } Double_t LauTimeDepFitModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); if (usingBkgnd_) { for ( const auto& yieldPar : bkgndEvents_ ) { eventSum += yieldPar->unblindValue(); } } return eventSum; } void LauTimeDepFitModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. if ( ! this->useDP() ) { // There's always going to be a term in the likelihood for the // signal, so we'd better not zero it. sigDPLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 1.0; } else { bkgndDPLike_[bkgndID] = 0.0; } } return; } // Calculate event quantities // Get the DP dynamics, decay time, and flavour tagging to calculate // everything required for the likelihood calculation sigModelB0bar_->calcLikelihoodInfo(iEvt); sigModelB0_->calcLikelihoodInfo(iEvt); - signalDecayTimePdf_->calcLikelihoodInfo(iEvt); + signalDecayTimePdf_->calcLikelihoodInfo(static_cast(iEvt)); flavTag_->updateEventInfo(iEvt); // Retrieve the amplitudes and efficiency from the dynamics LauComplex Abar { sigModelB0bar_->getEvtDPAmp() }; LauComplex A { sigModelB0_->getEvtDPAmp() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // If this is a QFS decay, one of the DP amplitudes needs to be zeroed if (cpEigenValue_ == QFS){ curEvtDecayFlv_ = flavTag_->getCurEvtDecayFlv(); if ( curEvtDecayFlv_ == +1 ) { Abar.zero(); } else if ( curEvtDecayFlv_ == -1 ) { A.zero(); } else { std::cerr<<"ERROR in LauTimeDepFitModel::getEvtDPDtLikelihood : Decay flavour must be known for QFS decays."<Exit(EXIT_FAILURE); } } // Next calculate the DP terms const Double_t aSqSum { A.abs2() + Abar.abs2() }; const Double_t aSqDif { A.abs2() - Abar.abs2() }; Double_t interTermRe { 0.0 }; Double_t interTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; if ( cpEigenValue_ == CPEven ) { interTermIm = 2.0 * inter.im(); interTermRe = 2.0 * inter.re(); } else { interTermIm = -2.0 * inter.im(); interTermRe = -2.0 * inter.re(); } } // First get all the decay time terms // TODO Backgrounds // Get the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; // Get all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // Get the decay time error term const Double_t dtErrLike { signalDecayTimePdf_->getErrTerm() }; // Get flavour tagging terms Double_t omega{1.0}; Double_t omegabar{1.0}; const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positiongetCapitalOmega(position, LauFlavTag::Flavour::B); omegabar *= flavTag_->getCapitalOmega(position, LauFlavTag::Flavour::Bbar); } const Double_t prodAsym { AProd_.unblindValue() }; const Double_t ftOmegaHyp { ((1.0 - prodAsym)*omega + (1.0 + prodAsym)*omegabar) }; const Double_t ftOmegaTrig { ((1.0 - prodAsym)*omega - (1.0 + prodAsym)*omegabar) }; const Double_t coshTerm { ftOmegaHyp * dtCosh * aSqSum }; const Double_t sinhTerm { ftOmegaHyp * dtSinh * interTermRe }; const Double_t cosTerm { ftOmegaTrig * dtCos * aSqDif }; const Double_t sinTerm { ftOmegaTrig * dtSin * interTermIm }; // Combine all terms to get the total amplitude squared const Double_t ASq { coshTerm + sinhTerm + cosTerm - sinTerm }; // Calculate the DP and time normalisation const Double_t normASqSum { sigModelB0_->getDPNorm() + sigModelB0bar_->getDPNorm() }; const Double_t normASqDiff { sigModelB0_->getDPNorm() - sigModelB0bar_->getDPNorm() }; Double_t normInterTermRe { 0.0 }; Double_t normInterTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { // TODO - double check this sign flipping here (it's presumably right but...) normInterTermRe = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermReNorm_ : interTermReNorm_; normInterTermIm = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermImNorm_ : interTermImNorm_; } const Double_t normCoshTerm { signalDecayTimePdf_->getNormTermCosh() }; const Double_t normSinhTerm { signalDecayTimePdf_->getNormTermSinh() }; const Double_t normCosTerm { signalDecayTimePdf_->getNormTermCos() }; const Double_t normSinTerm { signalDecayTimePdf_->getNormTermSin() }; const Double_t normHyp { normASqSum * normCoshTerm + normInterTermRe * normSinhTerm }; const Double_t normTrig { - prodAsym * ( normASqDiff * normCosTerm + normInterTermIm * normSinTerm ) }; // Combine all terms to get the total normalisation const Double_t norm { 2.0 * ( normHyp + normTrig ) }; // Multiply the squared-amplitude by the efficiency (DP and decay time) and decay-time error likelihood // and normalise to obtain the signal likelihood sigDPLike_ = ( ASq * dpEff * dtEff * dtErrLike ) / norm; // Background part // TODO add them into the actual Likelihood calculatiions // TODO sort out B and Bbar backgrounds for the DP here // TODO need to include the flavour tagging parts here as well (per tagger and per background source) and will vary by Bkgnd type as well // TODO add new function as getEvtBkgndLikelihoods? // TODO normalisation? const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { Double_t omegaBkgnd{1.0}; Double_t omegaBarBkgnd{1.0}; - BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(iEvt); + BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(static_cast(iEvt)); // Consider background type // TODO would this not be cleaner as a switch/cases block? if (BkgndTypes_[bkgndID] == LauFlavTag::Combinatorial){ // For Histogram Dt Pdfs // TODO - any other decay time function types that make sense for combinatorial? // - if so, maybe convert this to a switch with a default case to catch the types that don't make sense? // For comb background the DP and TD models factorise completely, just mulitply them bkgndDPLike_[bkgndID] = BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt); - if (BkgndDecayTimePdfs_[bkgndID]->getFuncType() == LauDecayTimePdf::Hist){ + if (BkgndDecayTimePdfs_[bkgndID]->getFuncType() == LauDecayTime::FuncType::Hist){ bkgndDPLike_[bkgndID] *= BkgndDecayTimePdfs_[bkgndID]->getHistTerm(); } else { bkgndDPLike_[bkgndID] *= BkgndDecayTimePdfs_[bkgndID]->getExpTerm()/BkgndDecayTimePdfs_[bkgndID]->getNormTermExp(); } // For flavour tagging for (ULong_t position{0}; positiongetCapitalOmegaBkgnd(position, LauFlavTag::Flavour::B, bkgndID); } bkgndDPLike_[bkgndID] *= omegaBkgnd; // TODO Add other bkg types } else if (BkgndTypes_[bkgndID] == LauFlavTag::FlavourSpecific){ switch( BkgndDecayTimePdfs_[bkgndID]->getFuncType() ) { - case LauDecayTimePdf::FuncType::Exp : //it still factorises + case LauDecayTime::FuncType::Exp : //it still factorises { bkgndDPLike_[bkgndID] = 0.5*(BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt) + BkgndDPModelsBbar_[bkgndID]->getLikelihood(iEvt)); bkgndDPLike_[bkgndID] *= BkgndDecayTimePdfs_[bkgndID]->getExpTerm()/BkgndDecayTimePdfs_[bkgndID]->getNormTermExp(); for (ULong_t position{0}; positiongetCapitalOmegaBkgnd(position, LauFlavTag::Flavour::B , bkgndID); omegaBarBkgnd *= flavTag_->getCapitalOmegaBkgnd(position, LauFlavTag::Flavour::Bbar, bkgndID); } bkgndDPLike_[bkgndID] *= ((1.0 - prodAsym)*omegaBkgnd + (1.0 + prodAsym)*omegaBarBkgnd); break; } - case LauDecayTimePdf::FuncType::Hist: //it still factorises + case LauDecayTime::FuncType::Hist: //it still factorises { bkgndDPLike_[bkgndID] = 0.5*(BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt) + BkgndDPModelsBbar_[bkgndID]->getLikelihood(iEvt)); bkgndDPLike_[bkgndID] *= BkgndDecayTimePdfs_[bkgndID]->getHistTerm(); for (ULong_t position{0}; positiongetCapitalOmegaBkgnd(position, LauFlavTag::Flavour::B , bkgndID); omegaBarBkgnd *= flavTag_->getCapitalOmegaBkgnd(position, LauFlavTag::Flavour::Bbar, bkgndID); } bkgndDPLike_[bkgndID] *= ((1.0 - prodAsym)*omegaBkgnd + (1.0 + prodAsym)*omegaBarBkgnd); break; } - case LauDecayTimePdf::FuncType::ExpTrig: // it doesn't factorise - case LauDecayTimePdf::FuncType::ExpHypTrig: + case LauDecayTime::FuncType::ExpTrig: // it doesn't factorise + case LauDecayTime::FuncType::ExpHypTrig: { //DP components first Double_t Asq { BkgndDPModelsB_[bkgndID] ->getUnNormValue(iEvt) }; Double_t Asqbar { BkgndDPModelsBbar_[bkgndID]->getUnNormValue(iEvt) }; //Used in the normalisation const Double_t AsqNorm { BkgndDPModelsB_[bkgndID] ->getPdfNorm() }; const Double_t AsqbarNorm { BkgndDPModelsBbar_[bkgndID]->getPdfNorm() }; //Do different things depending on whether the signal is Flav Specific or Self Conjugate if (cpEigenValue_ == QFS){ //Flavour specific curEvtDecayFlv_ = flavTag_->getCurEvtDecayFlv(); if ( curEvtDecayFlv_ == +1 ) { Asqbar = 0.; } else if ( curEvtDecayFlv_ == -1 ) { Asq = 0.; } } const Double_t AsqSum { Asq + Asqbar }; const Double_t AsqDiff { Asq - Asqbar }; const Double_t AsqNormSum { AsqNorm + AsqbarNorm }; //TODO check this shouldn't be `fabs`ed const Double_t AsqNormDiff { AsqNorm - AsqbarNorm }; // Now get all the decay time terms // Sin and Sinh terms are ignored: they FS modes can't exhibit TD CPV const Double_t dtCosBkgnd { BkgndDecayTimePdfs_[bkgndID]->getCosTerm() }; const Double_t dtCoshBkgnd { BkgndDecayTimePdfs_[bkgndID]->getCoshTerm() }; // Get all norm terms const Double_t normCosTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermCos() }; const Double_t normCoshTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermCosh() }; // Use signal flavour tagging for (ULong_t position{0}; positiongetCapitalOmegaBkgnd(position, LauFlavTag::Flavour::B , bkgndID); omegaBarBkgnd *= flavTag_->getCapitalOmegaBkgnd(position, LauFlavTag::Flavour::Bbar, bkgndID); } // Depends on the background source? Or use the signal one? // TODO change prodAsym to be that of this specific background, not the signal const Double_t ftOmegaHypBkgnd { ((1.0 - AProdBkgnd_[bkgndID]->unblindValue())*omegaBkgnd + (1.0 + AProdBkgnd_[bkgndID]->unblindValue())*omegaBarBkgnd) }; const Double_t ftOmegaTrigBkgnd { ((1.0 - AProdBkgnd_[bkgndID]->unblindValue())*omegaBkgnd - (1.0 + AProdBkgnd_[bkgndID]->unblindValue())*omegaBarBkgnd) }; //ExpTrig or ExpHypTrig modes //TODO Check normalisation const Double_t coshTermBkgnd { ftOmegaHypBkgnd * dtCoshBkgnd * AsqSum }; const Double_t cosTermBkgnd { ftOmegaTrigBkgnd * dtCosBkgnd * AsqDiff }; //See Laura note eq. 41 const Double_t normBkgnd { (normCoshTermBkgnd * AsqNormSum) - prodAsym*(normCosTermBkgnd * AsqNormDiff) }; bkgndDPLike_[bkgndID] *= (coshTermBkgnd + cosTermBkgnd)/normBkgnd; break; } - case LauDecayTimePdf::FuncType::Delta: //prompt case: irrellevant - case LauDecayTimePdf::FuncType::DeltaExp: + case LauDecayTime::FuncType::Delta: //prompt case: irrellevant + case LauDecayTime::FuncType::DeltaExp: //TODO move this error message std::cerr << "WARNING in LauTimeDepFitModel::getEvtDPDtLikelihood : bkgnd types Delta and DeltaExp don't make sense!" << std::endl; break; } } else if (BkgndTypes_[bkgndID] == LauFlavTag::SelfConjugate) { //Copy this from the CPeigenstate signal case std::cerr << "WARNING in LauTimeDepFitModel::getEvtDPDtLikelihood : SelfConjugate states aren't implemented yet!" << std::endl; bkgndDPLike_[bkgndID] = 0.0; break; } else if (BkgndTypes_[bkgndID] == LauFlavTag::NonSelfConjugate) { // TODO this has been ignored for now since it's not used in the B->Dpipi case std::cerr << "WARNING in LauTimeDepFitModel::getEvtDPDtLikelihood : NonSelfConjugate states aren't implemented yet!" << std::endl; bkgndDPLike_[bkgndID] = 0.0; break; } // Get the decay time acceptance const Double_t dtEffBkgnd { BkgndDecayTimePdfs_[bkgndID]->getEffiTerm() }; // Get the decay time error term const Double_t dtErrLikeBkgnd { BkgndDecayTimePdfs_[bkgndID]->getErrTerm() }; // Include these terms in the background likelihood bkgndDPLike_[bkgndID] *= dtEffBkgnd * dtErrLikeBkgnd; } else { bkgndDPLike_[bkgndID] = 0.0; } } } void LauTimeDepFitModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal if ( not sigExtraPdf_.empty() ) { sigExtraLike_ = this->prodPdfValue( sigExtraPdf_, iEvt ); } // Background const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = this->prodPdfValue( BkgndPdfs_[bkgndID], iEvt ); } else { bkgndExtraLike_[bkgndID] = 0.0; } } } //TODO obsolete? void LauTimeDepFitModel::getEvtFlavTagLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigFlavTagLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // Loop over taggers const ULong_t nTaggers { flavTag_->getNTaggers() }; for (ULong_t position{0}; positioncalcLikelihoodInfo(iEvt); sigFlavTagLike_ = sigFlavTagPdf_[position]->getLikelihood(); } } if (sigFlavTagLike_<=0){ std::cout<<"INFO in LauTimeDepFitModel::getEvtFlavTagLikelihood : Event with 0 FlavTag Liklihood"<antiparticleCoeff()); coeffsB0_.push_back(coeffPars_[i]->particleCoeff()); } } void LauTimeDepFitModel::checkMixingPhase() { Double_t phase = phiMix_.value(); Double_t genPhase = phiMix_.genValue(); // Check now whether the phase lies in the right range (-pi to pi). Bool_t withinRange(kFALSE); while (withinRange == kFALSE) { if (phase > -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepFitModel::embedSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_) { std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Already embedding signal from file."<findBranches(); if (!dataOK) { delete signalTree_; signalTree_ = 0; std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Problem creating data tree for embedding."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); LauEmbeddedData* bkgTree = bkgndTree_[bkgndID]; if (bkgTree) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Already embedding background from a file." << std::endl; return; } bkgTree = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = bkgTree->findBranches(); if (!dataOK) { delete bkgTree; bkgTree = 0; std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; if (this->enableEmbedding() == kFALSE) { this->enableEmbedding(kTRUE); } } void LauTimeDepFitModel::setupSPlotNtupleBranches() { // add branches for storing the experiment number and the number of // the event within the current experiment this->addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "TotalLike"; this->addSPlotNtupleDoubleBranch(name); } } // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "DPLike"; this->addSPlotNtupleDoubleBranch(name); } } } // Store the likelihoods for each extra PDF this->addSPlotNtupleBranches(sigExtraPdf_, "sig"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); this->addSPlotNtupleBranches(BkgndPdfs_[iBkgnd], bkgndClass); } } } void LauTimeDepFitModel::addSPlotNtupleBranches(const LauPdfList& extraPdfs, const TString& prefix) { // Loop through each of the PDFs for ( const LauAbsPdf* pdf : extraPdfs ) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } } TString name{prefix}; name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<calcLikelihoodInfo(iEvt); extraLike = pdf->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; const Double_t indivLike = pdf->getLikelihood( varName ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } } TString name{prefix}; name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Loop over the variables involved in each PDF const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { // If they are not DP coordinates then add them if ( varName != "m13Sq" && varName != "m23Sq" ) { nameSet.insert( varName ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepFitModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (!par->fixed()) { numbMap[bkgndClass] = par->genValue(); if ( ! par->isLValue() ) { std::cerr << "WARNING in LauTimeDepFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl; } } } } return numbMap; } LauSPlot::NumbMap LauTimeDepFitModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (par->fixed()) { numbMap[bkgndClass] = par->genValue(); } } } return numbMap; } LauSPlot::TwoDMap LauTimeDepFitModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( varNames[0], varNames[1] ) ) ); } } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); for ( const LauAbsPdf* pdf : BkgndPdfs_[iBkgnd] ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) ); } } } } return twodimMap; } void LauTimeDepFitModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_->initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 flavTag_->updateEventInfo(iEvt); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "DPLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]); } } } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigExtraPdf_, "sig", iEvt); // the background PDF values if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); LauPdfList& pdfs = BkgndPdfs_[iBkgnd]; bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(pdfs, bkgndClass, iEvt); } } // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "TotalLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]); } } // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Finished storing per-event likelihood values."< #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepFlavModel.hh" ClassImp(LauTimeDepFlavModel) LauTimeDepFlavModel::LauTimeDepFlavModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, const Bool_t useUntaggedEvents, const TString& tagVarName, const TString& tagCatVarName) : LauAbsFitModel(), sigModelB0bar_(modelB0bar), sigModelB0_(modelB0), kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0), kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0), useUntaggedEvents_(useUntaggedEvents), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), coeffsB0bar_(0), coeffsB0_(0), coeffPars_(0), fitFracB0bar_(0), fitFracB0_(0), fitFracAsymm_(0), acp_(0), meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0), meanEffB0_("meanEffB0",0.0,0.0,1.0), DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0), DPRateB0_("DPRateB0",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), signalTagCatFrac_(), tagVarName_(tagVarName), tagCatVarName_(tagCatVarName), cpevVarName_(""), validTagCats_(), curEvtTagFlv_(0), curEvtTagCat_(0), cpEigenValue_(CPEven), evtTagFlvVals_(0), evtTagCatVals_(0), evtCPEigenVals_(0), dilution_(), deltaDilution_(), //deltaM_("deltaM",LauConstants::deltaMd), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), sigExtraPdf_(), iterationsMax_(500000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigTotalLike_(0.0) { // Add the untagged category as a valid category this->addValidTagCat(0); // Set the fraction, average dilution and dilution difference for the untagged category this->setSignalTagCatPars(0, 1.0, 0.0, 0.0, kTRUE); } LauTimeDepFlavModel::~LauTimeDepFlavModel() { // TODO - need to delete the various embedded data structures here } void LauTimeDepFlavModel::setupBkgndVectors() { } void LauTimeDepFlavModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepFlavModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepFlavModel::setNBkgndEvents(LauAbsRValue* /*nBkgndEvents*/) { std::cerr << "WARNING in LauTimeDepFlavModel::setNBkgndEvents : This model does not yet support backgrounds" << std::endl; } void LauTimeDepFlavModel::addValidTagCats(const std::vector& tagCats) { for (std::vector::const_iterator iter = tagCats.begin(); iter != tagCats.end(); ++iter) { this->addValidTagCat(*iter); } } void LauTimeDepFlavModel::addValidTagCat(Int_t tagCat) { validTagCats_.insert(tagCat); } void LauTimeDepFlavModel::setSignalTagCatPars(const Int_t tagCat, const Double_t tagCatFrac, const Double_t dilution, const Double_t deltaDilution, const Bool_t fixTCFrac) { if (!this->validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepFlavModel::setSignalTagCatPars : Tagging category \""<checkSignalTagCatFractions(); only when the user has //set them all up, in this->initialise(); } void LauTimeDepFlavModel::checkSignalTagCatFractions() { Double_t totalTaggedFrac(0.0); for (LauTagCatParamMap::const_iterator iter=signalTagCatFrac_.begin(); iter!=signalTagCatFrac_.end(); ++iter) { if (iter->first != 0) { const LauParameter& par = iter->second; totalTaggedFrac += par.value(); } } if ( ((totalTaggedFrac < (1.0-1.0e-8))&&!useUntaggedEvents_) || (totalTaggedFrac > (1.0+1.0e-8)) ) { std::cerr<<"WARNING in LauTimeDepFlavModel::checkSignalTagCatFractions : Tagging category fractions add up to "<second; Double_t newVal = par.value() / totalTaggedFrac; par.value(newVal); par.initValue(newVal); par.genValue(newVal); } } else if (useUntaggedEvents_) { Double_t tagCatFrac = 1.0 - totalTaggedFrac; TString tagCatFracName("signalTagCatFrac0"); signalTagCatFrac_[0].name(tagCatFracName); signalTagCatFrac_[0].range(0.0,1.0); signalTagCatFrac_[0].value(tagCatFrac); signalTagCatFrac_[0].initValue(tagCatFrac); signalTagCatFrac_[0].genValue(tagCatFrac); signalTagCatFrac_[0].fixed(kTRUE); TString dilutionName("dilution0"); dilution_[0].name(dilutionName); dilution_[0].range(0.0,1.0); dilution_[0].value(0.0); dilution_[0].initValue(0.0); dilution_[0].genValue(0.0); TString deltaDilutionName("deltaDilution0"); deltaDilution_[0].name(deltaDilutionName); deltaDilution_[0].range(-2.0,2.0); deltaDilution_[0].value(0.0); deltaDilution_[0].initValue(0.0); deltaDilution_[0].genValue(0.0); } for (LauTagCatParamMap::const_iterator iter=dilution_.begin(); iter!=dilution_.end(); ++iter) { std::cout<<"INFO in LauTimeDepFlavModel::checkSignalTagCatFractions : Setting dilution for tagging category "<<(*iter).first<<" to "<<(*iter).second<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepFlavModel::setSignalDtPdf : Tagging category \""<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepFlavModel::setSignalPdfs : Tagging category \""<updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Check here that the tagging category fractions add up to 1, otherwise "normalise". Also set up the untagged cat. // NB this has to be done early in the initialization as other methods access the tagCats map. this->checkSignalTagCatFractions(); // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_)) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } this->setExtraNtupleVars(); } void LauTimeDepFlavModel::recalculateNormalisation() { sigModelB0bar_->recalculateNormalisation(); sigModelB0_->recalculateNormalisation(); sigModelB0bar_->modifyDataTree(); sigModelB0_->modifyDataTree(); this->calcInterferenceTermIntegrals(); } void LauTimeDepFlavModel::initialiseDPModels() { if (sigModelB0bar_ == 0) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialiseDPModels : B0bar signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0_ == 0) { std::cerr<<"ERROR in LauTimeDepFlavModel::initialiseDPModels : B0 signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up //const UInt_t nAmpB0bar = sigModelB0bar_->getnAmp(); //const UInt_t nAmpB0 = sigModelB0_->getnAmp(); const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp(); const UInt_t nAmpB0 = sigModelB0_->getnTotAmp(); if ( nAmpB0bar != nAmpB0 ) { std::cerr << "ERROR in LauTimeDepFlavModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepFlavModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepFlavModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); fifjEffSum_.clear(); fifjEffSum_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); } void LauTimeDepFlavModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos(); const std::vector& integralInfoListB0 = sigModelB0_->getIntegralInfos(); // TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc. LauComplex A, Abar, fifjEffSumTerm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions = integralInfoListB0bar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion]; const UInt_t nm13Points = integralInfoB0bar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight = integralInfoB0bar->getWeight(m13,m23); const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23); const Double_t effWeight = eff*weight; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A = integralInfoB0->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm = Abar*A.conj(); fifjEffSumTerm.rescale(effWeight); fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm; } } } } } } void LauTimeDepFlavModel::calcInterTermNorm() { const std::vector fNormB0bar = sigModelB0bar_->getFNorm(); const std::vector fNormB0 = sigModelB0_->getFNorm(); LauComplex norm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj(); coeffTerm *= fifjEffSum_[iAmp][jAmp]; coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]); norm += coeffTerm; } } norm *= phiMixComplex_; interTermReNorm_ = 2.0*norm.re(); interTermImNorm_ = 2.0*norm.im(); } void LauTimeDepFlavModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Is there a component called compName in the signal models? TString compName = coeffSet->name(); TString conjName = sigModelB0bar_->getConjResName(compName); //TODO this part needs work - it doesn't work for e.g. pi+ pi- K_S0, where you want the daughters to be in the same order but it is still conjugate! const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters(); const LauDaughters* daughtersB0 = sigModelB0_->getDaughters(); const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 ); if ( ! sigModelB0bar_->hasResonance(compName) ) { if ( ! sigModelB0bar_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<name( compName ); } if ( conjugate ) { if ( ! sigModelB0_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<hasResonance(compName) ) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<::const_iterator iter=coeffPars_.begin(); iter!=coeffPars_.end(); ++iter) { if ((*iter)->name() == compName) { std::cerr<<"ERROR in LauTimeDepFlavModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffPars_.push_back(coeffSet); TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0)); acp_.push_back(coeffSet->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepFlavModel::setAmpCoeffSet : Added coefficients for component \""<acp(); LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauTimeDepFlavModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepFlavModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; i++) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (LauParameterPList::iterator iter = pars.begin(); iter != pars.end(); ++iter) { if ( !(*iter)->clone() ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters(); LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters(); for ( LauParameterPList::iterator iter = sigDPParsB0bar.begin(); iter != sigDPParsB0bar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0.begin(); iter != sigDPParsB0.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } UInt_t LauTimeDepFlavModel::addParametersToFitList(LauTagCatDtPdfMap& theMap) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (LauTagCatDtPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { // grab the pdf and then its parameters LauDecayTimePdf* thePdf = (*iter).second; // The first one is the tagging category LauAbsRValuePList& rvalues = thePdf->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++counter; } } } } return counter; } UInt_t LauTimeDepFlavModel::addParametersToFitList(LauTagCatPdfMap& theMap) { UInt_t counter(0); // loop through the map for (LauTagCatPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { counter += this->addFitParameters(iter->second); // first is the tagging category } return counter; } void LauTimeDepFlavModel::setDecayTimeParameters() { nDecayTimePar_ = 0; // Loop over the Dt PDFs nDecayTimePar_ += this->addParametersToFitList(signalDecayTimePdfs_); LauParameterPList& fitVars = this->fitPars(); if (useSinCos_) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } else { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } void LauTimeDepFlavModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; nExtraPdfPar_ += this->addParametersToFitList(sigExtraPdf_); } void LauTimeDepFlavModel::setFitNEvents() { nNormPar_ = 0; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepFlavModel::setFitNEvents : Initialising number of events for signal and background components..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // tagging-category fractions for signal events for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { continue; } LauParameter* par = &((*iter).second); fitVars.push_back(par); ++nNormPar_; } } void LauTimeDepFlavModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 and B0bar fit fractions for each signal component fitFracB0bar_ = sigModelB0bar_->getFitFractions(); if (fitFracB0bar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue(); meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar); extraVars.push_back(meanEffB0bar_); Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue(); meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0); extraVars.push_back(meanEffB0_); // Also add in the DP rates Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue(); DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar); extraVars.push_back(DPRateB0bar_); Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue(); DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0); extraVars.push_back(DPRateB0_); } void LauTimeDepFlavModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters for (LauTagCatDtPdfMap::iterator iter = signalDecayTimePdfs_.begin(); iter != signalDecayTimePdfs_.end(); ++iter) { LauDecayTimePdf* pdf = (*iter).second; pdf->updatePulls(); } if (useSinCos_) { cosPhiMix_.updatePull(); sinPhiMix_.updatePull(); } else { this->checkMixingPhase(); } // Update the pulls on all the extra PDFs' parameters for (LauTagCatPdfMap::iterator iter = sigExtraPdf_.begin(); iter != sigExtraPdf_.end(); ++iter) { this->updateFitParameters(iter->second); } // Tagging-category fractions for signal and background events Double_t firstCatFrac(1.0); Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { firstCat = iter->first; continue; } LauParameter& par = (*iter).second; firstCatFrac -= par.value(); // update the parameter pull par.updatePull(); } signalTagCatFrac_[firstCat].value(firstCatFrac); signalTagCatFrac_[firstCat].updatePull(); // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo(); sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo(); LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions(); if (fitFracB0bar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepFlavModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B0bar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output<<"B0bar FitFraction for component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout<<"\\hline"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_.value()); fout << "$ & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(iter->second, fout); } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepFlavModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepFlavModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepFlavModel::LauGenInfo LauTimeDepFlavModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; LauTagCatGenInfo eventsB0, eventsB0bar; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; Double_t eventsB0byTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 - sigAsym)); Double_t eventsB0barbyTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 + sigAsym)); if (this->doPoissonSmearing()) { eventsB0byTagCat = LauRandom::randomFun()->Poisson(eventsB0byTagCat); eventsB0barbyTagCat = LauRandom::randomFun()->Poisson(eventsB0barbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsB0byTagCat), evtWeight ); eventsB0bar[iter->first] = std::make_pair( TMath::Nint(eventsB0barbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",-1)] = eventsB0; nEvtsGen[std::make_pair("signal",+1)] = eventsB0bar; } else { Double_t rateB0bar = sigModelB0bar_->getDPRate().value(); Double_t rateB0 = sigModelB0_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; if (this->doPoissonSmearing()) { eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. } std::cout<<"INFO in LauTimeDepFlavModel::eventsToGenerate : Generating toy MC with:"<setGenNtupleIntegerBranchValue("genSig",1); // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { genOK = kFALSE; } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); this->setGenNtupleIntegerBranchValue("tagCat",curEvtTagCat_); this->setGenNtupleIntegerBranchValue("tagFlv",curEvtTagFlv_); // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFlavModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_->checkToyMC(kTRUE); sigModelB0_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events //if (!signalTree_.empty() && (reuseSignal_ || !genOK)) { if (reuseSignal_ || !genOK) { for(LauTagCatEmbDataMap::const_iterator iter = signalTree_.begin(); iter != signalTree_.end(); ++iter) { (iter->second)->clearUsedList(); } } return genOK; } Bool_t LauTimeDepFlavModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_->squareDP(); doSquareDP &= kinematicsB0_->squareDP(); LauKinematics* kinematics(kinematicsB0bar_); // find the right decay time PDF for the current tagging category LauTagCatDtPdfMap::const_iterator dt_iter = signalDecayTimePdfs_.find(curEvtTagCat_); LauDecayTimePdf* decayTimePdf = (dt_iter != signalDecayTimePdfs_.end()) ? dt_iter->second : 0; // find the right embedded data for the current tagging category LauTagCatEmbDataMap::const_iterator emb_iter = signalTree_.find(curEvtTagCat_); LauEmbeddedData* embeddedData = (emb_iter != signalTree_.end()) ? emb_iter->second : 0; // find the right extra PDFs for the current tagging category LauTagCatPdfMap::iterator extra_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* extraPdfs = (extra_iter != sigExtraPdf_.end()) ? &(extra_iter->second) : 0; if (this->useDP()) { if (embeddedData) { embeddedData->getEmbeddedEvent(kinematics); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); if (embeddedData->haveBranch("mcMatch")) { Int_t match = TMath::Nint(embeddedData->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = decayTimePdf->generateError(kTRUE); while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { // Calculate the unnormalised truth-matched signal likelihood // First let define the tag flavour Double_t randNo = LauRandom::randomFun()->Rndm(); if (randNo < 0.5) { curEvtTagFlv_ = +1; // B0 tag } else { curEvtTagFlv_ = -1; // B0bar tag } // Calculate event quantities that depend only on the tagCat and tagFlv Double_t qD = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); Double_t qDDo2 = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); // Generate the DP position Double_t m13Sq(0.0), m23Sq(0.0); kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A and Abar for the given DP position sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar = sigModelB0bar_->getEvtDPAmp(); const LauComplex& A = sigModelB0_->getEvtDPAmp(); Double_t eff = sigModelB0bar_->getEvtEff(); // Next calculate the DP terms Double_t aSqSum = A.abs2() + Abar.abs2(); Double_t aSqDif = A.abs2() - Abar.abs2(); LauComplex inter = Abar * A.conj() * phiMixComplex_; Double_t interTermIm = 2.0 * inter.im(); Double_t interTermRe = 2.0 * inter.re(); // Generate decay time const Double_t tMin = decayTimePdf->minAbscissa(); const Double_t tMax = decayTimePdf->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin; // Calculate all the decay time info decayTimePdf->calcLikelihoodInfo(curEvtDecayTime_, curEvtDecayTimeErr_); // First get all the decay time terms //Double_t dtExp = decayTimePdf->getExpTerm(); Double_t dtCos = decayTimePdf->getCosTerm(); Double_t dtSin = decayTimePdf->getSinTerm(); Double_t dtCosh = decayTimePdf->getCoshTerm(); Double_t dtSinh = decayTimePdf->getSinhTerm(); // Combine all terms Double_t cosTerm = dtCos * qD * aSqDif; Double_t sinTerm = dtSin * qD * interTermIm; Double_t coshTerm = dtCosh * (1.0 + qDDo2) * aSqSum; Double_t sinhTerm = dtSinh * (1.0 + qDDo2) * interTermRe; if ( cpEigenValue_ == CPOdd ) { sinTerm *= -1.0; sinhTerm *= -1.0; } // ... to get the total and multiply by the efficiency Double_t ASq = coshTerm + cosTerm - sinTerm + sinhTerm; //ASq /= decayTimePdf->getNormTerm(); ASq *= eff; //Finally we throw the dice to see whether this event should be generated //We make a distinction between the likelihood of TM and SCF to tag the SCF events as such randNo = LauRandom::randomFun()->Rndm(); if (randNo <= ASq/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASq > aSqMaxVar_) {aSqMaxVar_ = ASq;} } else { nGenLoop_++; } } // end of while !generatedEvent loop } // end of if (embeddedData) else control } else { if ( embeddedData ) { embeddedData->getEmbeddedEvent(0); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFlavModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFlavModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() ); this->generateExtraPdfValues(extraPdfs, embeddedData); } // Check for problems with the embedding if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepFlavModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } void LauTimeDepFlavModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); this->addGenNtupleIntegerBranch("tagFlv"); this->addGenNtupleIntegerBranch("tagCat"); if (this->useDP() == kTRUE) { // Let's add the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->addGenNtupleDoubleBranch(pdf->varName()); this->addGenNtupleDoubleBranch(pdf->varErrName()); } this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0Amp"); this->addGenNtupleDoubleBranch("imB0Amp"); this->addGenNtupleDoubleBranch("reB0barAmp"); this->addGenNtupleDoubleBranch("imB0barAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories if ( ! sigExtraPdf_.empty() ) { LauPdfList oneTagCatPdfList = sigExtraPdf_.begin()->second; for (LauPdfList::const_iterator pdf_iter = oneTagCatPdfList.begin(); pdf_iter != oneTagCatPdfList.end(); ++pdf_iter) { for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { this->addGenNtupleDoubleBranch( (*var_iter) ); } } } } } void LauTimeDepFlavModel::setDPDtBranchValues() { // Store the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->setGenNtupleDoubleBranchValue(pdf->varName(),curEvtDecayTime_); this->setGenNtupleDoubleBranchValue(pdf->varErrName(),curEvtDecayTimeErr_); } LauKinematics* kinematics(0); if (curEvtTagFlv_<0) { kinematics = kinematicsB0_; } else { kinematics = kinematicsB0bar_; } // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar = sigModelB0bar_->getEvtDPAmp(); LauComplex A = sigModelB0_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0Amp", A.re()); this->setGenNtupleDoubleBranchValue("imB0Amp", A.im()); this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re()); this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0); } } } void LauTimeDepFlavModel::generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData) { LauKinematics* kinematics(0); if (curEvtTagFlv_<0) { kinematics = kinematicsB0_; } else { kinematics = kinematicsB0bar_; } // Generate from the extra PDFs if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } } void LauTimeDepFlavModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0_->updateCoeffs(coeffsB0_); this->calcInterTermNorm(); } // Update the signal events from the background numbers if not doing an extended fit if (!this->doEMLFit()) { this->updateSigEvents(); } } void LauTimeDepFlavModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; // tagging-category fractions for signal events this->setFirstTagCatFrac(signalTagCatFrac_); signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } void LauTimeDepFlavModel::setFirstTagCatFrac(LauTagCatParamMap& theMap) { Double_t firstCatFrac = 1.0; Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { if (iter == theMap.begin()) { firstCat = iter->first; continue; } LauParameter& par = iter->second; firstCatFrac -= par.unblindValue(); } theMap[firstCat].value(firstCatFrac); } void LauTimeDepFlavModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); // Start by caching the tagging and CP-eigenstate information evtTagCatVals_.clear(); evtTagFlvVals_.clear(); evtCPEigenVals_.clear(); if ( ! inputFitData->haveBranch( tagCatVarName_ ) ) { std::cerr << "ERROR in LauTimeDepFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagCatVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! inputFitData->haveBranch( tagVarName_ ) ) { std::cerr << "ERROR in LauTimeDepFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtTagCatVals_.reserve( nEvents ); evtTagFlvVals_.reserve( nEvents ); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); fitdata_iter = dataValues.find( tagCatVarName_ ); curEvtTagCat_ = static_cast( fitdata_iter->second ); if ( ! this->validTagCat( curEvtTagCat_ ) ) { std::cerr << "WARNING in LauTimeDepFlavModel::cacheInputFitVars : Invalid tagging category " << curEvtTagCat_ << " for event " << iEvt << ", setting it to untagged" << std::endl; curEvtTagCat_ = 0; } evtTagCatVals_.push_back( curEvtTagCat_ ); fitdata_iter = dataValues.find( tagVarName_ ); curEvtTagFlv_ = static_cast( fitdata_iter->second ); if ( TMath::Abs( curEvtTagFlv_ ) != 1 ) { if ( curEvtTagFlv_ > 0 ) { std::cerr << "WARNING in LauTimeDepFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTagFlv_ = +1; } else { std::cerr << "WARNING in LauTimeDepFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTagFlv_ = -1; } } evtTagFlvVals_.push_back( curEvtTagFlv_ ); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else { std::cerr<<"WARNING in LauTimeDepFlavModel::cacheInputFitVars : Unknown value: "<useDP() == kTRUE) { // DecayTime and SigmaDecayTime for (LauTagCatDtPdfMap::iterator dt_iter = signalDecayTimePdfs_.begin(); dt_iter != signalDecayTimePdfs_.end(); ++dt_iter) { (*dt_iter).second->cacheInfo(*inputFitData); } } // ...and then the extra PDFs for (LauTagCatPdfMap::iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter) { this->cacheInfo(pdf_iter->second, *inputFitData); } if (this->useDP() == kTRUE) { sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } } Double_t LauTimeDepFlavModel::getTotEvtLikelihood(const UInt_t iEvt) { // Find out whether the tag-side B was a B0 or a B0bar. curEvtTagFlv_ = evtTagFlvVals_[iEvt]; // Also get the tagging category. curEvtTagCat_ = evtTagCatVals_[iEvt]; // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds) this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds) this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); if (this->useDP() == kFALSE) { signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); } // Construct the total event likelihood Double_t likelihood(sigLike*signalTagCatFrac_[curEvtTagCat_].unblindValue()); if ( ! signalEvents_->fixed() ) { likelihood *= signalEvents; } return likelihood; } Double_t LauTimeDepFlavModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); return eventSum; } void LauTimeDepFlavModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. sigDPLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. if ( this->useDP() == kFALSE ) { return; } // Mistag probabilities. Defined as: omega = prob of the tagging B0 being reported as B0bar // Whether we want omega or omegaBar depends on q_tag, hence curEvtTagFlv_*... in the previous lines //Double_t misTagFrac = 0.5 * (1.0 - dilution_[curEvtTagCat_] - qDDo2); //Double_t misTagFracBar = 0.5 * (1.0 - dilution_[curEvtTagCat_] + qDDo2); // Calculate event quantities Double_t qD = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); Double_t qDDo2 = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); // Get the dynamics to calculate everything required for the likelihood calculation sigModelB0bar_->calcLikelihoodInfo(iEvt); sigModelB0_->calcLikelihoodInfo(iEvt); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar = sigModelB0bar_->getEvtDPAmp(); const LauComplex& A = sigModelB0_->getEvtDPAmp(); Double_t eff = sigModelB0bar_->getEvtEff(); // Next calculate the DP terms Double_t aSqSum = A.abs2() + Abar.abs2(); Double_t aSqDif = A.abs2() - Abar.abs2(); LauComplex inter = Abar * A.conj() * phiMixComplex_; Double_t interTermIm = 2.0 * inter.im(); Double_t interTermRe = 2.0 * inter.re(); // First get all the decay time terms //LauDecayTimePdf* signalDtPdf = signalDecayTimePdfs_[curEvtTagCat_]; LauDecayTimePdf* decayTimePdf = signalDecayTimePdfs_[curEvtTagCat_]; - decayTimePdf->calcLikelihoodInfo(iEvt); + decayTimePdf->calcLikelihoodInfo(static_cast(iEvt)); // First get all the decay time terms Double_t dtCos = decayTimePdf->getCosTerm(); Double_t dtSin = decayTimePdf->getSinTerm(); Double_t dtCosh = decayTimePdf->getCoshTerm(); Double_t dtSinh = decayTimePdf->getSinhTerm(); Double_t cosTerm = dtCos * qD * aSqDif; Double_t sinTerm = dtSin * qD * interTermIm; Double_t coshTerm = dtCosh * (1.0 + qDDo2) * aSqSum; Double_t sinhTerm = dtSinh * (1.0 + qDDo2) * interTermRe; if ( cpEigenValue_ == CPOdd ) { sinTerm *= -1.0; sinhTerm *= -1.0; } // ... to get the total and multiply by the efficiency Double_t ASq = coshTerm + cosTerm - sinTerm + sinhTerm; ASq *= eff; // Calculate the DP and time normalisation Double_t normTermIndep = sigModelB0bar_->getDPNorm() + sigModelB0_->getDPNorm(); Double_t normTermCosh = decayTimePdf->getNormTermCosh(); Double_t normTermDep = interTermReNorm_; Double_t normTermSinh = decayTimePdf->getNormTermSinh(); Double_t norm = normTermIndep*normTermCosh + normTermDep*normTermSinh; // Calculate the normalised signal likelihood sigDPLike_ = ASq / norm; } void LauTimeDepFlavModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* pdfList = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; if (pdfList) { sigExtraLike_ = this->prodPdfValue( *pdfList, iEvt ); } } void LauTimeDepFlavModel::updateCoeffs() { coeffsB0bar_.clear(); coeffsB0_.clear(); coeffsB0bar_.reserve(nSigComp_); coeffsB0_.reserve(nSigComp_); for (UInt_t i = 0; i < nSigComp_; ++i) { coeffsB0bar_.push_back(coeffPars_[i]->antiparticleCoeff()); coeffsB0_.push_back(coeffPars_[i]->particleCoeff()); } } Bool_t LauTimeDepFlavModel::validTagCat(Int_t tagCat) const { return (validTagCats_.find(tagCat) != validTagCats_.end()); } Bool_t LauTimeDepFlavModel::checkTagCatFracMap(const LauTagCatParamMap& theMap) const { // First check that there is an entry for each tagging category. // NB an entry won't have been added if it isn't a valid category // so don't need to check for that here. if (theMap.size() != signalTagCatFrac_.size()) { std::cerr<<"ERROR in LauTimeDepFlavModel::checkTagCatFracMap : Not all tagging categories present."< 1E-10) { std::cerr<<"ERROR in LauTimeDepFlavModel::checkTagCatFracMap : Tagging category event fractions do not sum to unity."< -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepFlavModel::embedSignal(Int_t tagCat, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_[tagCat]) { std::cerr<<"ERROR in LauTimeDepFlavModel::embedSignal : Already embedding signal from file for tagging category "<findBranches(); if (!dataOK) { delete signalTree_[tagCat]; signalTree_[tagCat] = 0; std::cerr<<"ERROR in LauTimeDepFlavModel::embedSignal : Problem creating data tree for embedding."<addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); } // Store the likelihoods for each extra PDF const LauPdfList* pdfList( &(sigExtraPdf_.begin()->second) ); this->addSPlotNtupleBranches(pdfList, "sig"); } void LauTimeDepFlavModel::addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix) { if (!extraPdfs) { return; } // Loop through each of the PDFs for (LauPdfList::const_iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } TString name(prefix); name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepFlavModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // calculate the likelihood for this event (*pdf_iter)->calcLikelihoodInfo(iEvt); extraLike = (*pdf_iter)->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; Double_t indivLike = (*pdf_iter)->getLikelihood( (*var_iter) ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } TString name(prefix); name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } LauPdfList pdfList( (sigExtraPdf_.begin()->second) ); for (LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { // Loop over the variables involved in each PDF for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { // If they are not DP coordinates then add them if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { nameSet.insert( (*var_iter) ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepFlavModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::NumbMap LauTimeDepFlavModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::TwoDMap LauTimeDepFlavModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; const LauPdfList* pdfList = &(sigExtraPdf_.begin()->second); for (LauPdfList::const_iterator pdf_iter = pdfList->begin(); pdf_iter != pdfList->end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( (*pdf_iter)->varNames()[0], (*pdf_iter)->varNames()[1] ) ) ); } } return twodimMap; } void LauTimeDepFlavModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepFlavModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_->initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 curEvtTagFlv_ = evtTagFlvVals_[iEvt]; curEvtTagCat_ = evtTagCatVals_[iEvt]; LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* sigPdfs = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigPdfs, "sig", iEvt); // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepFlavModel::storePerEvtLlhds : Finished storing per-event likelihood values."< #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepNonFlavModel.hh" ClassImp(LauTimeDepNonFlavModel) LauTimeDepNonFlavModel::LauTimeDepNonFlavModel(LauIsobarDynamics* modelB0bar_f, LauIsobarDynamics* modelB0_f, LauIsobarDynamics* modelB0bar_fbar, LauIsobarDynamics* modelB0_fbar, const Bool_t useUntaggedEvents, const TString& tagVarName, const TString& tagCatVarName) : LauAbsFitModel(), sigModelB0bar_f_(modelB0bar_f), sigModelB0_f_(modelB0_f), sigModelB0bar_fbar_(modelB0bar_fbar), sigModelB0_fbar_(modelB0_fbar), kinematicsB0bar_f_(modelB0bar_f ? modelB0bar_f->getKinematics() : 0), kinematicsB0_f_(modelB0_f ? modelB0_f->getKinematics() : 0), kinematicsB0bar_fbar_(modelB0bar_fbar ? modelB0bar_fbar->getKinematics() : 0), kinematicsB0_fbar_(modelB0_fbar ? modelB0_fbar->getKinematics() : 0), useUntaggedEvents_(useUntaggedEvents), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), coeffsB0bar_f_(0), coeffsB0_f_(0), coeffsB0bar_fbar_(0), coeffsB0_fbar_(0), coeffPars_B0f_B0barfbar_(0), coeffPars_B0fbar_B0barf_(0), interTermReNorm_f_(0), interTermReNorm_fbar_(0), interTermImNorm_f_(0), interTermImNorm_fbar_(0), fitFracB0bar_f_(0), fitFracB0_f_(0), fitFracB0bar_fbar_(0), fitFracB0_fbar_(0), fitFracAsymm_B0f_B0barfbar_(0), fitFracAsymm_B0fbar_B0barf_(0), acp_B0f_B0barfbar_(0), acp_B0fbar_B0barf_(0), meanEffB0bar_f_("meanEffB0bar_f",0.0,0.0,1.0), meanEffB0_f_("meanEffB0_f",0.0,0.0,1.0), meanEffB0bar_fbar_("meanEffB0bar_fbar",0.0,0.0,1.0), meanEffB0_fbar_("meanEffB0_fbar",0.0,0.0,1.0), DPRateB0bar_f_("DPRateB0bar_f",0.0,0.0,100.0), DPRateB0_f_("DPRateB0_f",0.0,0.0,100.0), DPRateB0bar_fbar_("DPRateB0bar_fbar",0.0,0.0,100.0), DPRateB0_fbar_("DPRateB0_fbar",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), signalTagCatFrac_(), tagVarName_(tagVarName), tagCatVarName_(tagCatVarName), cpevVarName_(""), validTagCats_(), curEvtTagFlv_(0), curEvtTagCat_(0), cpEigenValue_(CPEven), evtTagFlvVals_(0), evtTagCatVals_(0), evtCPEigenVals_(0), dilution_(), deltaDilution_(), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -3.0, 3.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), qD_(0.0), qDDo2_(0.0), sigExtraPdf_(), finalState_(0.0), iterationsMax_(500000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), normTimeDP_f_(0.0), normTimeDP_fbar_(0.0), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigTotalLike_(0.0) { // Add the untagged category as a valid category this->addValidTagCat(0); // Set the fraction, average dilution and dilution difference for the untagged category this->setSignalTagCatPars(0, 1.0, 0.0, 0.0, kTRUE); } LauTimeDepNonFlavModel::~LauTimeDepNonFlavModel() { // TODO - need to delete the various embedded data structures here } void LauTimeDepNonFlavModel::setupBkgndVectors() { } void LauTimeDepNonFlavModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauTimeDepNonFlavModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); } void LauTimeDepNonFlavModel::setNBkgndEvents(LauAbsRValue* /*nBkgndEvents*/) { std::cerr << "WARNING in LauTimeDepNonFlavModel::setNBkgndEvents : This model does not yet support backgrounds" << std::endl; } void LauTimeDepNonFlavModel::addValidTagCats(const std::vector& tagCats) { for (std::vector::const_iterator iter = tagCats.begin(); iter != tagCats.end(); ++iter) { this->addValidTagCat(*iter); } } void LauTimeDepNonFlavModel::addValidTagCat(Int_t tagCat) { validTagCats_.insert(tagCat); } void LauTimeDepNonFlavModel::setSignalTagCatPars(const Int_t tagCat, const Double_t tagCatFrac, const Double_t dilution, const Double_t deltaDilution, const Bool_t fixTCFrac) { if (!this->validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setSignalTagCatPars : Tagging category \""<checkSignalTagCatFractions(); only when the user has //set them all up, in this->initialise(); } void LauTimeDepNonFlavModel::checkSignalTagCatFractions() { Double_t totalTaggedFrac(0.0); for (LauTagCatParamMap::const_iterator iter=signalTagCatFrac_.begin(); iter!=signalTagCatFrac_.end(); ++iter) { if (iter->first != 0) { const LauParameter& par = iter->second; totalTaggedFrac += par.value(); } } if ( ((totalTaggedFrac < (1.0-1.0e-8))&&!useUntaggedEvents_) || (totalTaggedFrac > (1.0+1.0e-8)) ) { std::cerr<<"WARNING in LauTimeDepNonFlavModel::checkSignalTagCatFractions : Tagging category fractions add up to "<second; Double_t newVal = par.value() / totalTaggedFrac; par.value(newVal); par.initValue(newVal); par.genValue(newVal); } } else if (useUntaggedEvents_) { Double_t tagCatFrac = 1.0 - totalTaggedFrac; TString tagCatFracName("signalTagCatFrac0"); signalTagCatFrac_[0].name(tagCatFracName); signalTagCatFrac_[0].range(0.0,1.0); signalTagCatFrac_[0].value(tagCatFrac); signalTagCatFrac_[0].initValue(tagCatFrac); signalTagCatFrac_[0].genValue(tagCatFrac); signalTagCatFrac_[0].fixed(kTRUE); TString dilutionName("dilution0"); dilution_[0].name(dilutionName); dilution_[0].range(0.0,1.0); dilution_[0].value(0.0); dilution_[0].initValue(0.0); dilution_[0].genValue(0.0); TString deltaDilutionName("deltaDilution0"); deltaDilution_[0].name(deltaDilutionName); deltaDilution_[0].range(-2.0,2.0); deltaDilution_[0].value(0.0); deltaDilution_[0].initValue(0.0); deltaDilution_[0].genValue(0.0); } for (LauTagCatParamMap::const_iterator iter=dilution_.begin(); iter!=dilution_.end(); ++iter) { std::cout<<"INFO in LauTimeDepNonFlavModel::checkSignalTagCatFractions : Setting dilution for tagging category "<<(*iter).first<<" to "<<(*iter).second<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setSignalDtPdf : Tagging category \""<validTagCat(tagCat)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setSignalPdfs : Tagging category \""<updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_f_ == 0) || (sigModelB0_f_ == 0) || (sigModelB0bar_fbar_ == 0) || (sigModelB0bar_fbar_ == 0)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Check here that the tagging category fractions add up to 1, otherwise "normalise". Also set up the untagged cat. // NB this has to be done early in the initialization as other methods access the tagCats map. this->checkSignalTagCatFractions(); // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_)) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } this->setExtraNtupleVars(); } void LauTimeDepNonFlavModel::recalculateNormalisation() { sigModelB0bar_f_->recalculateNormalisation(); sigModelB0_f_->recalculateNormalisation(); sigModelB0bar_fbar_->recalculateNormalisation(); sigModelB0_fbar_->recalculateNormalisation(); sigModelB0bar_f_->modifyDataTree(); sigModelB0_f_->modifyDataTree(); sigModelB0bar_fbar_->modifyDataTree(); sigModelB0_fbar_->modifyDataTree(); this->calcInterferenceTermIntegrals(); } void LauTimeDepNonFlavModel::initialiseDPModels() { if (sigModelB0bar_f_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0bar -> f signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0_f_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0 -> f signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0bar_fbar_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0bar -> fbar signal DP model doesn't exist"<Exit(EXIT_FAILURE); } if (sigModelB0_fbar_ == 0) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::initialiseDPModels : B0 -> fbar signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up //const UInt_t nAmpB0bar_f = sigModelB0bar_f_->getnAmp(); //const UInt_t nAmpB0_f = sigModelB0_f_->getnAmp(); //const UInt_t nAmpB0bar_fbar = sigModelB0bar_fbar_->getnAmp(); //const UInt_t nAmpB0_fbar = sigModelB0_fbar_->getnAmp(); const UInt_t nAmpB0bar_f = sigModelB0bar_f_->getnTotAmp(); const UInt_t nAmpB0_f = sigModelB0_f_->getnTotAmp(); const UInt_t nAmpB0bar_fbar = sigModelB0bar_fbar_->getnTotAmp(); const UInt_t nAmpB0_fbar = sigModelB0_fbar_->getnTotAmp(); if ( nAmpB0bar_f != nAmpB0_f ){ std::cerr << "ERROR in LauTimeDepNonFlavModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar_f << " != " << nAmpB0_f << std::endl; gSystem->Exit(EXIT_FAILURE); } else if ( nAmpB0bar_fbar != nAmpB0_fbar ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar_fbar << " != " << nAmpB0_fbar << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar_f != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar_f << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepNonFlavModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_f_); sigModelB0_f_->initialise(coeffsB0_f_); sigModelB0bar_fbar_->initialise(coeffsB0bar_fbar_); sigModelB0_fbar_->initialise(coeffsB0_fbar_); fifjEffSum_f_.clear(); fifjEffSum_fbar_.clear(); fifjEffSum_f_.resize(nSigComp_); fifjEffSum_fbar_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_f_[iAmp].resize(nSigComp_); fifjEffSum_fbar_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterTermNorm(); } void LauTimeDepNonFlavModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar_f = sigModelB0bar_f_->getIntegralInfos(); const std::vector& integralInfoListB0_f = sigModelB0_f_->getIntegralInfos(); const std::vector& integralInfoListB0bar_fbar = sigModelB0bar_fbar_->getIntegralInfos(); const std::vector& integralInfoListB0_fbar = sigModelB0_fbar_->getIntegralInfos(); LauComplex A_f, Abar_f, A_fbar, Abar_fbar, fifjEffSumTerm_f, fifjEffSumTerm_fbar; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_f_[iAmp][jAmp].zero(); fifjEffSum_fbar_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions_f = integralInfoListB0bar_f.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions_f; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar_f = integralInfoListB0bar_f[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0_f = integralInfoListB0_f[iRegion]; const UInt_t nm13Points = integralInfoB0bar_f->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar_f->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight_f = integralInfoB0bar_f->getWeight(m13,m23); const Double_t eff_f = integralInfoB0bar_f->getEfficiency(m13,m23); const Double_t effWeight_f = eff_f*weight_f; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A_f = integralInfoB0_f->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar_f = integralInfoB0bar_f->getAmplitude(m13, m23, jAmp); fifjEffSumTerm_f = Abar_f*A_f.conj(); fifjEffSumTerm_f.rescale(effWeight_f); fifjEffSum_f_[iAmp][jAmp] += fifjEffSumTerm_f; } } } } } const UInt_t nIntegralRegions_fbar = integralInfoListB0bar_fbar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions_fbar; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar_fbar = integralInfoListB0bar_fbar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0_fbar = integralInfoListB0_fbar[iRegion]; const UInt_t nm13Points = integralInfoB0bar_fbar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar_fbar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight_fbar = integralInfoB0bar_fbar->getWeight(m13,m23); const Double_t eff_fbar = integralInfoB0bar_fbar->getEfficiency(m13,m23); const Double_t effWeight_fbar = eff_fbar*weight_fbar; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A_fbar = integralInfoB0_fbar->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar_fbar = integralInfoB0bar_fbar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm_fbar = Abar_fbar*A_fbar.conj(); fifjEffSumTerm_fbar.rescale(effWeight_fbar); fifjEffSum_fbar_[iAmp][jAmp] += fifjEffSumTerm_fbar; } } } } } } void LauTimeDepNonFlavModel::calcInterTermNorm() { const std::vector fNormB0bar_f = sigModelB0bar_f_->getFNorm(); const std::vector fNormB0_f = sigModelB0_f_->getFNorm(); const std::vector fNormB0bar_fbar = sigModelB0bar_fbar_->getFNorm(); const std::vector fNormB0_fbar = sigModelB0_fbar_->getFNorm(); LauComplex norm_f; LauComplex norm_fbar; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm_f = coeffsB0bar_f_[jAmp]*coeffsB0_f_[iAmp].conj(); LauComplex coeffTerm_fbar = coeffsB0bar_fbar_[jAmp]*coeffsB0_fbar_[iAmp].conj(); coeffTerm_f *= fifjEffSum_f_[iAmp][jAmp]; coeffTerm_fbar *= fifjEffSum_fbar_[iAmp][jAmp]; coeffTerm_f.rescale(fNormB0bar_f[jAmp] * fNormB0_f[iAmp]); coeffTerm_fbar.rescale(fNormB0bar_fbar[jAmp] * fNormB0_fbar[iAmp]); norm_f += coeffTerm_f; norm_fbar += coeffTerm_fbar; } } norm_f *= phiMixComplex_; norm_fbar *= phiMixComplex_; interTermReNorm_f_ = 2.0*norm_f.re(); interTermImNorm_f_ = 2.0*norm_f.im(); interTermReNorm_fbar_ = 2.0*norm_fbar.re(); interTermImNorm_fbar_ = 2.0*norm_fbar.im(); } void LauTimeDepNonFlavModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : Set of coefficients for B0/B0bar -> f,fbar contains only component for f final state \""<name()<<"\"."<name(); TString compName_B0fbar_B0barf = coeffSet_B0fbar_B0barf->name(); TString conjName_B0f_B0barfbar = sigModelB0bar_fbar_->getConjResName(compName_B0f_B0barfbar); TString conjName_B0fbar_B0barf = sigModelB0bar_f_->getConjResName(compName_B0fbar_B0barf); std::cout << "Values are: " << std::endl; std::cout << "CompName: " << compName_B0f_B0barfbar << " " << compName_B0fbar_B0barf << std::endl; std::cout << "ComjName: " << conjName_B0f_B0barfbar << " " << conjName_B0fbar_B0barf << std::endl; // Define each daughter configuration const LauDaughters* daughtersB0bar_f = sigModelB0bar_f_->getDaughters(); const LauDaughters* daughtersB0_f = sigModelB0_f_->getDaughters(); const LauDaughters* daughtersB0bar_fbar = sigModelB0bar_fbar_->getDaughters(); const LauDaughters* daughtersB0_fbar = sigModelB0_fbar_->getDaughters(); const Bool_t conjugateB0_f = daughtersB0_f->isConjugate( daughtersB0bar_fbar ); const Bool_t conjugateB0_fbar = daughtersB0_fbar->isConjugate( daughtersB0bar_f ); std::cout << "I am here 1 " << std::endl; if ( ! sigModelB0_f_->hasResonance(compName_B0f_B0barfbar) ) { if ( ! sigModelB0_f_->hasResonance(conjName_B0f_B0barfbar) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : B0 -> f signal DP model doesn't contain component \""<< compName_B0f_B0barfbar <<"\"."<hasResonance(compName_B0fbar_B0barf) ) { std::cout << "Checked: " << compName_B0fbar_B0barf << std::endl; if ( ! sigModelB0_fbar_->hasResonance(conjName_B0fbar_B0barf) ) { std::cout << "Checked: " << conjName_B0fbar_B0barf << std::endl; std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : B0 -> fbar signal DP model doesn't contain component \""<< compName_B0fbar_B0barf<<"\"."<hasResonance(conjName_B0f_B0barfbar) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<hasResonance(conjName_B0fbar_B0barf) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<hasResonance(compName_B0f_B0barfbar) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<hasResonance(compName_B0fbar_B0barf) ) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : signal DP model doesn't contain component \""<::const_iterator iter_B0f_B0barfbar=coeffPars_B0f_B0barfbar_.begin(); iter_B0f_B0barfbar!=coeffPars_B0f_B0barfbar_.end(); ++iter_B0f_B0barfbar) { if ((*iter_B0f_B0barfbar)->name() == compName_B0f_B0barfbar) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : Have already set coefficients for \""<::const_iterator iter_B0fbar_B0barf=coeffPars_B0fbar_B0barf_.begin(); iter_B0fbar_B0barf!=coeffPars_B0fbar_B0barf_.end(); ++iter_B0fbar_B0barf) { if ((*iter_B0fbar_B0barf)->name() == compName_B0fbar_B0barf) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffSet_B0fbar_B0barf->index(nSigComp_); coeffPars_B0f_B0barfbar_.push_back(coeffSet_B0f_B0barfbar); coeffPars_B0fbar_B0barf_.push_back(coeffSet_B0fbar_B0barf); TString parName_B0f_B0barfbar = coeffSet_B0f_B0barfbar->baseName(); parName_B0f_B0barfbar += "FitFracAsym"; TString parName_B0fbar_B0barf = coeffSet_B0fbar_B0barf->baseName(); parName_B0fbar_B0barf += "FitFracAsym"; fitFracAsymm_B0f_B0barfbar_.push_back(LauParameter(parName_B0f_B0barfbar, 0.0, -1.0, 1.0)); fitFracAsymm_B0fbar_B0barf_.push_back(LauParameter(parName_B0fbar_B0barf, 0.0, -1.0, 1.0)); acp_B0f_B0barfbar_.push_back(coeffSet_B0f_B0barfbar->acp()); acp_B0fbar_B0barf_.push_back(coeffSet_B0fbar_B0barf->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepNonFlavModel::setAmpCoeffSet : Added coefficients for components \""<f, B0bar->fbar) and \""<fbar, B0bar->f)"<acp(); acp_B0fbar_B0barf_[i] = coeffPars_B0fbar_B0barf_[i]->acp(); LauAsymmCalc asymmCalc_B0f_B0barfbar(fitFracB0bar_fbar_[i][i].value(), fitFracB0_f_[i][i].value()); LauAsymmCalc asymmCalc_B0fbar_B0barf(fitFracB0bar_f_[i][i].value(), fitFracB0_fbar_[i][i].value()); Double_t asym_B0f_B0barfbar = asymmCalc_B0f_B0barfbar.getAsymmetry(); Double_t asym_B0fbar_B0barf = asymmCalc_B0fbar_B0barf.getAsymmetry(); fitFracAsymm_B0f_B0barfbar_[i].value(asym_B0f_B0barfbar); fitFracAsymm_B0fbar_B0barf_[i].value(asym_B0fbar_B0barf); if (initValues) { fitFracAsymm_B0f_B0barfbar_[i].genValue(asym_B0f_B0barfbar); fitFracAsymm_B0fbar_B0barf_[i].genValue(asym_B0fbar_B0barf); fitFracAsymm_B0f_B0barfbar_[i].initValue(asym_B0f_B0barfbar); fitFracAsymm_B0fbar_B0barf_[i].initValue(asym_B0fbar_B0barf); } } } void LauTimeDepNonFlavModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepNonFlavModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; i++) { LauParameterPList pars_B0f_B0barfbar = coeffPars_B0f_B0barfbar_[i]->getParameters(); LauParameterPList pars_B0fbar_B0barf = coeffPars_B0fbar_B0barf_[i]->getParameters(); for (LauParameterPList::iterator iter_B0f_B0barfbar = pars_B0f_B0barfbar.begin(); iter_B0f_B0barfbar != pars_B0f_B0barfbar.end(); ++iter_B0f_B0barfbar) { if ( !(*iter_B0f_B0barfbar)->clone() ) { fitVars.push_back(*iter_B0f_B0barfbar); ++nSigDPPar_; } } for (LauParameterPList::iterator iter_B0fbar_B0barf = pars_B0fbar_B0barf.begin(); iter_B0fbar_B0barf != pars_B0fbar_B0barf.end(); ++iter_B0fbar_B0barf) { if ( !(*iter_B0fbar_B0barf)->clone() ) { fitVars.push_back(*iter_B0fbar_B0barf); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPParsB0bar_f = sigModelB0bar_f_->getFloatingParameters(); LauParameterPList& sigDPParsB0_f = sigModelB0_f_->getFloatingParameters(); LauParameterPList& sigDPParsB0bar_fbar = sigModelB0bar_fbar_->getFloatingParameters(); LauParameterPList& sigDPParsB0_fbar = sigModelB0_fbar_->getFloatingParameters(); for ( LauParameterPList::iterator iter = sigDPParsB0bar_f.begin(); iter != sigDPParsB0bar_f.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0_f.begin(); iter != sigDPParsB0_f.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0bar_fbar.begin(); iter != sigDPParsB0bar_fbar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = sigDPParsB0_fbar.begin(); iter != sigDPParsB0_fbar.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } UInt_t LauTimeDepNonFlavModel::addParametersToFitList(LauTagCatDtPdfMap& theMap) { UInt_t counter(0); LauParameterPList& fitVars = this->fitPars(); // loop through the map for (LauTagCatDtPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { // grab the pdf and then its parameters LauDecayTimePdf* thePdf = (*iter).second; // The first one is the tagging category LauAbsRValuePList& rvalues = thePdf->getParameters(); // loop through the parameters for (LauAbsRValuePList::iterator pars_iter = rvalues.begin(); pars_iter != rvalues.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { // for each "original" parameter add it to the list of fit parameters and increment the counter if ( !(*params_iter)->clone() && ( !(*params_iter)->fixed() || (this->twoStageFit() && (*params_iter)->secondStage()) ) ) { fitVars.push_back(*params_iter); ++counter; } } } } return counter; } UInt_t LauTimeDepNonFlavModel::addParametersToFitList(LauTagCatPdfMap& theMap) { UInt_t counter(0); // loop through the map for (LauTagCatPdfMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { counter += this->addFitParameters(iter->second); // first is the tagging category } return counter; } void LauTimeDepNonFlavModel::setDecayTimeParameters() { nDecayTimePar_ = 0; // Loop over the Dt PDFs nDecayTimePar_ += this->addParametersToFitList(signalDecayTimePdfs_); LauParameterPList& fitVars = this->fitPars(); if (useSinCos_) { fitVars.push_back(&sinPhiMix_); fitVars.push_back(&cosPhiMix_); nDecayTimePar_ += 2; } else { fitVars.push_back(&phiMix_); ++nDecayTimePar_; } } void LauTimeDepNonFlavModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; nExtraPdfPar_ += this->addParametersToFitList(sigExtraPdf_); } void LauTimeDepNonFlavModel::setFitNEvents() { nNormPar_ = 0; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepNonFlavModel::setFitNEvents : Initialising number of events for signal and background components..."<useDP() == kFALSE) { fitVars.push_back(signalAsym_); ++nNormPar_; } // tagging-category fractions for signal events for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { continue; } LauParameter* par = &((*iter).second); fitVars.push_back(par); ++nNormPar_; } } void LauTimeDepNonFlavModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 (f/fbar) and B0bar (f/fbar) fit fractions for each signal component fitFracB0bar_f_ = sigModelB0bar_f_->getFitFractions(); if (fitFracB0bar_f_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_f_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0bar_fbar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_fbar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_B0f_B0barfbar_[i]); extraVars.push_back(fitFracAsymm_B0fbar_B0barf_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_B0f_B0barfbar_[i]); extraVars.push_back(acp_B0fbar_B0barf_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar_f = sigModelB0bar_f_->getMeanEff().initValue(); meanEffB0bar_f_.value(initMeanEffB0bar_f); meanEffB0bar_f_.initValue(initMeanEffB0bar_f); meanEffB0bar_f_.genValue(initMeanEffB0bar_f); extraVars.push_back(meanEffB0bar_f_); Double_t initMeanEffB0_f = sigModelB0_f_->getMeanEff().initValue(); meanEffB0_f_.value(initMeanEffB0_f); meanEffB0_f_.initValue(initMeanEffB0_f); meanEffB0_f_.genValue(initMeanEffB0_f); extraVars.push_back(meanEffB0_f_); Double_t initMeanEffB0bar_fbar = sigModelB0bar_fbar_->getMeanEff().initValue(); meanEffB0bar_fbar_.value(initMeanEffB0bar_fbar); meanEffB0bar_fbar_.initValue(initMeanEffB0bar_fbar); meanEffB0bar_fbar_.genValue(initMeanEffB0bar_fbar); extraVars.push_back(meanEffB0bar_fbar_); Double_t initMeanEffB0_fbar = sigModelB0_fbar_->getMeanEff().initValue(); meanEffB0_fbar_.value(initMeanEffB0_fbar); meanEffB0_fbar_.initValue(initMeanEffB0_fbar); meanEffB0_fbar_.genValue(initMeanEffB0_fbar); extraVars.push_back(meanEffB0_fbar_); // Also add in the DP rates Double_t initDPRateB0bar_f = sigModelB0bar_f_->getDPRate().initValue(); DPRateB0bar_f_.value(initDPRateB0bar_f); DPRateB0bar_f_.initValue(initDPRateB0bar_f); DPRateB0bar_f_.genValue(initDPRateB0bar_f); extraVars.push_back(DPRateB0bar_f_); Double_t initDPRateB0_f = sigModelB0_f_->getDPRate().initValue(); DPRateB0_f_.value(initDPRateB0_f); DPRateB0_f_.initValue(initDPRateB0_f); DPRateB0_f_.genValue(initDPRateB0_f); extraVars.push_back(DPRateB0_f_); Double_t initDPRateB0bar_fbar = sigModelB0bar_fbar_->getDPRate().initValue(); DPRateB0bar_fbar_.value(initDPRateB0bar_fbar); DPRateB0bar_fbar_.initValue(initDPRateB0bar_fbar); DPRateB0bar_fbar_.genValue(initDPRateB0bar_fbar); extraVars.push_back(DPRateB0bar_fbar_); Double_t initDPRateB0_fbar = sigModelB0_fbar_->getDPRate().initValue(); DPRateB0_fbar_.value(initDPRateB0_fbar); DPRateB0_fbar_.initValue(initDPRateB0_fbar); DPRateB0_fbar_.genValue(initDPRateB0_fbar); extraVars.push_back(DPRateB0_fbar_); } void LauTimeDepNonFlavModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_B0f_B0barfbar_[i]->finaliseValues(); coeffPars_B0fbar_B0barf_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters for (LauTagCatDtPdfMap::iterator iter = signalDecayTimePdfs_.begin(); iter != signalDecayTimePdfs_.end(); ++iter) { LauDecayTimePdf* pdf = (*iter).second; pdf->updatePulls(); } if (useSinCos_) { cosPhiMix_.updatePull(); sinPhiMix_.updatePull(); } else { this->checkMixingPhase(); } // Update the pulls on all the extra PDFs' parameters for (LauTagCatPdfMap::iterator iter = sigExtraPdf_.begin(); iter != sigExtraPdf_.end(); ++iter) { this->updateFitParameters(iter->second); } // Tagging-category fractions for signal and background events Double_t firstCatFrac(1.0); Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { if (iter == signalTagCatFrac_.begin()) { firstCat = iter->first; continue; } LauParameter& par = (*iter).second; firstCatFrac -= par.value(); // update the parameter pull par.updatePull(); } signalTagCatFrac_[firstCat].value(firstCatFrac); signalTagCatFrac_[firstCat].updatePull(); // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_f_->updateCoeffs(coeffsB0bar_f_); sigModelB0bar_f_->calcExtraInfo(); sigModelB0_f_->updateCoeffs(coeffsB0_f_); sigModelB0_f_->calcExtraInfo(); sigModelB0bar_fbar_->updateCoeffs(coeffsB0bar_fbar_); sigModelB0bar_fbar_->calcExtraInfo(); sigModelB0_fbar_->updateCoeffs(coeffsB0_fbar_); sigModelB0_fbar_->calcExtraInfo(); LauParArray fitFracB0bar_f = sigModelB0bar_f_->getFitFractions(); if (fitFracB0bar_f.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_f = sigModelB0_f_->getFitFractions(); if (fitFracB0_f.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0bar_fbar = sigModelB0bar_fbar_->getFitFractions(); if (fitFracB0bar_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_fbar = sigModelB0_fbar_->getFitFractions(); if (fitFracB0_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_f_.value(sigModelB0_f_->getMeanEff().value()); meanEffB0bar_fbar_.value(sigModelB0bar_fbar_->getMeanEff().value()); meanEffB0_fbar_.value(sigModelB0_fbar_->getMeanEff().value()); DPRateB0bar_f_.value(sigModelB0bar_f_->getDPRate().value()); DPRateB0_f_.value(sigModelB0_f_->getDPRate().value()); DPRateB0bar_fbar_.value(sigModelB0bar_fbar_->getDPRate().value()); DPRateB0_fbar_.value(sigModelB0_fbar_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepNonFlavModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // B0 -> f events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0f_B0barfbar_[i]->name()); output<<"B0bar FitFraction for component "< f overall DP rate (integral of matrix element squared) = "< f average efficiency weighted by whole DP dynamics = "< fbar sample for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0f_B0barfbar_[i]->name()); const TString conjName(sigModelB0_f_->getConjResName(compName)); output<<"B0 FitFraction for component "< fbar overall DP rate (integral of matrix element squared) = "< fbar average efficiency weighted by whole DP dynamics = "< fbar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0fbar_B0barf_[i]->name()); output<<"B0bar FitFraction for component "< fbar overall DP rate (integral of matrix element squared) = "< fbar average efficiency weighted by whole DP dynamics = "< f sample for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_B0fbar_B0barf_[i]->name()); const TString conjName(sigModelB0_fbar_->getConjResName(compName)); output<<"B0 FitFraction for component "< f overall DP rate (integral of matrix element squared) = "< f average efficiency weighted by whole DP dynamics = "<name()); output<<"Fit Fraction for B0(B0bar) -> f(fbar) asymmetry for component "< fbar(f) asymmetry for component "< f(fbar) component "<name()); output<<"ACP for B0(B0bar) -> fbar(f) component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_B0f_B0barfbar_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_B0f_B0barfbar_[i]->printTableRow(fout); } fout<<"\\hline"<printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_B0fbar_B0barf_[i]->printTableRow(fout); } fout<<"\\hline"< f(fbar) fout<<"\\begin{tabular}{|l|c|c|c|c|}"< fbar \\ Fit Fraction & \\Bz ->f \\ Fit Fraction & Fit Fraction Asymmetry & $A_{\\CP}$ \\\\"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_fbar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_f_.value()); fout << "$ & & \\\\" << std::endl; fout << "$ & & & & & & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; // Another combination for B0(B0bar) -> fbar(f) fout<<"\\begin{tabular}{|l|c|c|c|c|}"< f \\ Fit Fraction & \\Bz ->fbar \\ Fit Fraction & Fit Fraction Asymmetry & $A_{\\CP}$ \\\\"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_f_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_fbar_.value()); fout << "$ & & \\\\" << std::endl; fout << "$ & & & & & & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(iter->second, fout); } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepNonFlavModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepNonFlavModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); coeffPars_B0fbar_B0barf_[i]->randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepNonFlavModel::LauGenInfo LauTimeDepNonFlavModel::eventsToGenerate() { // TODO : Check whether in this bit we keep the same procedure or not // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; LauTagCatGenInfo eventsB0, eventsB0bar; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; Double_t eventsB0byTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 - sigAsym)); Double_t eventsB0barbyTagCat = TMath::Nint(eventsbyTagCat/2.0 * (1.0 + sigAsym)); if (this->doPoissonSmearing()) { eventsB0byTagCat = LauRandom::randomFun()->Poisson(eventsB0byTagCat); eventsB0barbyTagCat = LauRandom::randomFun()->Poisson(eventsB0barbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsB0byTagCat), evtWeight ); eventsB0bar[iter->first] = std::make_pair( TMath::Nint(eventsB0barbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",-1)] = eventsB0; nEvtsGen[std::make_pair("signal",+1)] = eventsB0bar; } else { Double_t rateB0bar = sigModelB0bar_f_->getDPRate().value(); Double_t rateB0 = sigModelB0_f_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac_.begin(); iter != signalTagCatFrac_.end(); ++iter) { const LauParameter& par = iter->second; Double_t eventsbyTagCat = par.value() * nEvts; if (this->doPoissonSmearing()) { eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); } eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); } nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. } std::cout<<"INFO in LauTimeDepNonFlavModel::eventsToGenerate : Generating toy MC with:"<setGenNtupleIntegerBranchValue("genSig",1); // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { genOK = kFALSE; } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); this->setGenNtupleIntegerBranchValue("tagCat",curEvtTagCat_); this->setGenNtupleIntegerBranchValue("tagFlv",curEvtTagFlv_); // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepNonFlavModel::genExpt : Generated event number "<useDP() && genOK) { sigModelB0bar_f_->checkToyMC(kTRUE); sigModelB0_f_->checkToyMC(kTRUE); sigModelB0bar_fbar_->checkToyMC(kTRUE); sigModelB0_fbar_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_f = sigModelB0_f_->getFitFractions(); if (fitFracB0_f.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0bar_fbar = sigModelB0bar_fbar_->getFitFractions(); if (fitFracB0bar_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0_fbar = sigModelB0_fbar_->getFitFractions(); if (fitFracB0_fbar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_f_.value(sigModelB0_f_->getMeanEff().value()); meanEffB0bar_fbar_.value(sigModelB0bar_fbar_->getMeanEff().value()); meanEffB0_fbar_.value(sigModelB0_fbar_->getMeanEff().value()); DPRateB0bar_f_.value(sigModelB0bar_f_->getDPRate().value()); DPRateB0_f_.value(sigModelB0_f_->getDPRate().value()); DPRateB0bar_fbar_.value(sigModelB0bar_fbar_->getDPRate().value()); DPRateB0_fbar_.value(sigModelB0_fbar_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events //if (!signalTree_.empty() && (reuseSignal_ || !genOK)) { if (reuseSignal_ || !genOK) { for(LauTagCatEmbDataMap::const_iterator iter = signalTree_.begin(); iter != signalTree_.end(); ++iter) { (iter->second)->clearUsedList(); } } return genOK; } Bool_t LauTimeDepNonFlavModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); Bool_t doSquareDP = kinematicsB0bar_f_->squareDP(); doSquareDP &= kinematicsB0_f_->squareDP(); doSquareDP &= kinematicsB0bar_fbar_->squareDP(); doSquareDP &= kinematicsB0_fbar_->squareDP(); LauKinematics* kinematics = 0; //(kinematicsB0bar_); // find the right decay time PDF for the current tagging category LauTagCatDtPdfMap::const_iterator dt_iter = signalDecayTimePdfs_.find(curEvtTagCat_); LauDecayTimePdf* decayTimePdf = (dt_iter != signalDecayTimePdfs_.end()) ? dt_iter->second : 0; // find the right embedded data for the current tagging category LauTagCatEmbDataMap::const_iterator emb_iter = signalTree_.find(curEvtTagCat_); LauEmbeddedData* embeddedData = (emb_iter != signalTree_.end()) ? emb_iter->second : 0; // find the right extra PDFs for the current tagging category LauTagCatPdfMap::iterator extra_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* extraPdfs = (extra_iter != sigExtraPdf_.end()) ? &(extra_iter->second) : 0; if (this->useDP()) { if (embeddedData) { // TODO : correct the kinematic term to the two possible final state // This option is not allowed in the moment kinematics = kinematicsB0bar_f_; embeddedData->getEmbeddedEvent(kinematics); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); if (embeddedData->haveBranch("mcMatch")) { Int_t match = TMath::Nint(embeddedData->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = decayTimePdf->generateError(kTRUE); while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { // Calculate the unnormalised truth-matched signal likelihood // First let define the tag flavour Double_t randNo = LauRandom::randomFun()->Rndm(); if (randNo < 0.5) { curEvtTagFlv_ = +1; // B0 tag } else { curEvtTagFlv_ = -1; // B0bar tag } // Calculate event quantities that depend only on the tagCat and tagFlv qD_ = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); qDDo2_ = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); // Generate decay time const Double_t tMin = decayTimePdf->minAbscissa(); const Double_t tMax = decayTimePdf->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Rndm()*(tMax-tMin) + tMin; // Calculate all the decay time info decayTimePdf->calcLikelihoodInfo(curEvtDecayTime_, curEvtDecayTimeErr_); // Calculate the relevant amplitude normalisation for the two DP's this->calculateAmplitudeNorm(decayTimePdf); // DP variables Double_t m13Sq(0.0), m23Sq(0.0); Double_t randNo_finalState = LauRandom::randomFun()->Rndm(); if (randNo_finalState < normTimeDP_f_/(normTimeDP_f_+normTimeDP_fbar_)) { finalState_ = +1; // A(Abar) -> f // Generate DP position kinematicsB0bar_f_->genFlatPhaseSpace(m13Sq, m23Sq); kinematicsB0_f_->updateKinematics(kinematicsB0bar_f_->getm13Sq(), kinematicsB0bar_f_->getm23Sq() ); // Calculate the total A and Abar for the given DP position sigModelB0_f_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_f_->calcLikelihoodInfo(m13Sq, m23Sq); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_f_, sigModelB0_f_); } else { finalState_ = -1; // A(Abar) -> fbar // Generate DP position kinematicsB0bar_fbar_->genFlatPhaseSpace(m13Sq, m23Sq); kinematicsB0_fbar_->updateKinematics(kinematicsB0bar_fbar_->getm13Sq(), kinematicsB0bar_fbar_->getm23Sq() ); // Calculate the total A and Abar for the given DP position sigModelB0_fbar_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_fbar_->calcLikelihoodInfo(m13Sq, m23Sq); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_fbar_, sigModelB0_fbar_); } //Finally we throw the dice to see whether this event should be generated //We make a distinction between the likelihood of TM and SCF to tag the SCF events as such randNo = LauRandom::randomFun()->Rndm(); if (randNo <= ASq_/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASq_ > aSqMaxVar_) {aSqMaxVar_ = ASq_;} } else { nGenLoop_++; } } // end of while !generatedEvent loop } // end of if (embeddedData) else control } else { if ( embeddedData ) { embeddedData->getEmbeddedEvent(0); curEvtTagFlv_ = TMath::Nint(embeddedData->getValue("tagFlv")); curEvtDecayTimeErr_ = embeddedData->getValue(decayTimePdf->varErrName()); curEvtDecayTime_ = embeddedData->getValue(decayTimePdf->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepNonFlavModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepNonFlavModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<generateExtraPdfValues(extraPdfs, embeddedData); } // Check for problems with the embedding if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepNonFlavModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } void LauTimeDepNonFlavModel::calculateDPterms(LauDecayTimePdf* decayTimePdf, LauIsobarDynamics* sigModelB0bar, LauIsobarDynamics* sigModelB0) { // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar = sigModelB0bar->getEvtDPAmp(); const LauComplex& A = sigModelB0->getEvtDPAmp(); Double_t eff = sigModelB0bar->getEvtEff(); // Calculate the DP terms Double_t aSqSum = A.abs2() + Abar.abs2(); Double_t aSqDif = A.abs2() - Abar.abs2(); LauComplex inter = Abar * A.conj() * phiMixComplex_; Double_t interTermIm = 2.0 * inter.im(); Double_t interTermRe = 2.0 * inter.re(); // Decay time pdf terms Double_t dtCos = decayTimePdf->getCosTerm(); Double_t dtSin = decayTimePdf->getSinTerm(); Double_t dtCosh = decayTimePdf->getCoshTerm(); Double_t dtSinh = decayTimePdf->getSinhTerm(); // Combine all terms Double_t cosTerm = dtCos * qD_ * aSqDif; Double_t sinTerm = dtSin * qD_ * interTermIm; Double_t coshTerm = dtCosh * (1.0 + qDDo2_) * aSqSum; Double_t sinhTerm = dtSinh * (1.0 + qDDo2_) * interTermRe; if ( cpEigenValue_ == CPOdd ) { sinTerm *= -1.0; sinhTerm *= -1.0; } // Total amplitude and multiply by the efficiency ASq_ = coshTerm + cosTerm - sinTerm + sinhTerm; ASq_ *= eff; } void LauTimeDepNonFlavModel::calculateAmplitudeNorm(LauDecayTimePdf* decayTimePdf) { // Integrals of the sum of the ampltudes to the f(fbar) integral( |A|^2 + |Abar|^2 ) dP Double_t normTermNonDep_f = sigModelB0bar_f_->getDPNorm() + sigModelB0_f_->getDPNorm(); Double_t normTermNonDep_fbar = sigModelB0bar_fbar_->getDPNorm() + sigModelB0_fbar_->getDPNorm(); // Integrals of cross terms |Abar|*|Aconj| Double_t normTermDep_f = interTermReNorm_f_; Double_t normTermDep_fbar = interTermReNorm_fbar_; // Decay time constant integrals Double_t normTermCosh = decayTimePdf->getNormTermCosh(); Double_t normTermSinh = decayTimePdf->getNormTermSinh(); // Time-dependent DP normalisation terms normTimeDP_f_ = normTermNonDep_f*normTermCosh + normTermDep_f*normTermSinh; normTimeDP_fbar_ = normTermNonDep_fbar*normTermCosh + normTermDep_fbar*normTermSinh; } void LauTimeDepNonFlavModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); this->addGenNtupleIntegerBranch("tagFlv"); this->addGenNtupleIntegerBranch("tagCat"); if (this->useDP() == kTRUE) { // Let's add the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->addGenNtupleDoubleBranch(pdf->varName()); this->addGenNtupleDoubleBranch(pdf->varErrName()); } this->addGenNtupleDoubleBranch("m12_f"); this->addGenNtupleDoubleBranch("m23_f"); this->addGenNtupleDoubleBranch("m13_f"); this->addGenNtupleDoubleBranch("m12Sq_f"); this->addGenNtupleDoubleBranch("m23Sq_f"); this->addGenNtupleDoubleBranch("m13Sq_f"); this->addGenNtupleDoubleBranch("cosHel12_f"); this->addGenNtupleDoubleBranch("cosHel23_f"); this->addGenNtupleDoubleBranch("cosHel13_f"); if (kinematicsB0bar_f_->squareDP() && kinematicsB0_f_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime_f"); this->addGenNtupleDoubleBranch("thPrime_f"); } this->addGenNtupleDoubleBranch("m12_fbar"); this->addGenNtupleDoubleBranch("m23_fbar"); this->addGenNtupleDoubleBranch("m13_fbar"); this->addGenNtupleDoubleBranch("m12Sq_fbar"); this->addGenNtupleDoubleBranch("m23Sq_fbar"); this->addGenNtupleDoubleBranch("m13Sq_fbar"); this->addGenNtupleDoubleBranch("cosHel12_fbar"); this->addGenNtupleDoubleBranch("cosHel23_fbar"); this->addGenNtupleDoubleBranch("cosHel13_fbar"); if (kinematicsB0bar_fbar_->squareDP() && kinematicsB0_fbar_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime_fbar"); this->addGenNtupleDoubleBranch("thPrime_fbar"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0fAmp"); this->addGenNtupleDoubleBranch("imB0fAmp"); this->addGenNtupleDoubleBranch("reB0barfAmp"); this->addGenNtupleDoubleBranch("imB0barfAmp"); this->addGenNtupleDoubleBranch("reB0fbarAmp"); this->addGenNtupleDoubleBranch("imB0fbarAmp"); this->addGenNtupleDoubleBranch("reB0barfbarAmp"); this->addGenNtupleDoubleBranch("imB0barfbarAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories if ( ! sigExtraPdf_.empty() ) { LauPdfList oneTagCatPdfList = sigExtraPdf_.begin()->second; for (LauPdfList::const_iterator pdf_iter = oneTagCatPdfList.begin(); pdf_iter != oneTagCatPdfList.end(); ++pdf_iter) { for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { this->addGenNtupleDoubleBranch( (*var_iter) ); } } } } } void LauTimeDepNonFlavModel::setDPDtBranchValues() { // Store the decay time variables. if (signalDecayTimePdfs_.begin() != signalDecayTimePdfs_.end()) { LauDecayTimePdf* pdf = signalDecayTimePdfs_.begin()->second; this->setGenNtupleDoubleBranchValue(pdf->varName(),curEvtDecayTime_); this->setGenNtupleDoubleBranchValue(pdf->varErrName(),curEvtDecayTimeErr_); } LauKinematics* kinematics_f(0); LauKinematics* kinematics_fbar(0); if (curEvtTagFlv_<0) { kinematics_f = kinematicsB0_f_; kinematics_fbar = kinematicsB0_fbar_; } else { kinematics_f = kinematicsB0bar_f_; kinematics_fbar = kinematicsB0bar_fbar_; } // Store all the DP information this->setGenNtupleDoubleBranchValue("m12_f", kinematics_f->getm12()); this->setGenNtupleDoubleBranchValue("m23_f", kinematics_f->getm23()); this->setGenNtupleDoubleBranchValue("m13_f", kinematics_f->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq_f", kinematics_f->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq_f", kinematics_f->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq_f", kinematics_f->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12_f", kinematics_f->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23_f", kinematics_f->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13_f", kinematics_f->getc13()); if (kinematics_f->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime_f", kinematics_f->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime_f", kinematics_f->getThetaPrime()); } this->setGenNtupleDoubleBranchValue("m12_fbar", kinematics_fbar->getm12()); this->setGenNtupleDoubleBranchValue("m23_fbar", kinematics_fbar->getm23()); this->setGenNtupleDoubleBranchValue("m13_fbar", kinematics_fbar->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq_fbar", kinematics_fbar->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq_fbar", kinematics_fbar->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq_fbar", kinematics_fbar->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12_fbar", kinematics_fbar->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23_fbar", kinematics_fbar->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13_fbar", kinematics_fbar->getc13()); if (kinematics_fbar->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime_fbar", kinematics_fbar->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime_fbar", kinematics_fbar->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar_f = sigModelB0bar_f_->getEvtDPAmp(); LauComplex A_f = sigModelB0_f_->getEvtDPAmp(); LauComplex Abar_fbar = sigModelB0bar_fbar_->getEvtDPAmp(); LauComplex A_fbar = sigModelB0_fbar_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0fAmp", A_f.re()); this->setGenNtupleDoubleBranchValue("imB0fAmp", A_f.im()); this->setGenNtupleDoubleBranchValue("reB0barfAmp", Abar_f.re()); this->setGenNtupleDoubleBranchValue("imB0barfAmp", Abar_f.im()); this->setGenNtupleDoubleBranchValue("reB0fbarAmp", A_fbar.re()); this->setGenNtupleDoubleBranchValue("imB0fbarAmp", A_fbar.im()); this->setGenNtupleDoubleBranchValue("reB0barfbarAmp", Abar_fbar.re()); this->setGenNtupleDoubleBranchValue("imB0barfbarAmp", Abar_fbar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0fAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0fAmp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barfAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barfAmp", 0.0); this->setGenNtupleDoubleBranchValue("reB0fbarAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0fbarAmp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barfbarAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barfbarAmp", 0.0); } } } void LauTimeDepNonFlavModel::generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData) { // TODO : need to add the additional DP LauKinematics* kinematics_f(0); //LauKinematics* kinematics_fbar(0); if (curEvtTagFlv_<0) { kinematics_f = kinematicsB0_f_; //kinematics_fbar = kinematicsB0_fbar_; } else { kinematics_f = kinematicsB0bar_f_; //kinematics_fbar = kinematicsB0bar_fbar_; } // Generate from the extra PDFs if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics_f); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } } void LauTimeDepNonFlavModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_f_->updateCoeffs(coeffsB0bar_f_); sigModelB0_f_->updateCoeffs(coeffsB0_f_); sigModelB0bar_fbar_->updateCoeffs(coeffsB0bar_fbar_); sigModelB0_fbar_->updateCoeffs(coeffsB0_fbar_); this->calcInterTermNorm(); } // Update the signal events from the background numbers if not doing an extended fit if (!this->doEMLFit()) { this->updateSigEvents(); } } void LauTimeDepNonFlavModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; // tagging-category fractions for signal events this->setFirstTagCatFrac(signalTagCatFrac_); signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } void LauTimeDepNonFlavModel::setFirstTagCatFrac(LauTagCatParamMap& theMap) { Double_t firstCatFrac = 1.0; Int_t firstCat(0); for (LauTagCatParamMap::iterator iter = theMap.begin(); iter != theMap.end(); ++iter) { if (iter == theMap.begin()) { firstCat = iter->first; continue; } LauParameter& par = iter->second; firstCatFrac -= par.unblindValue(); } theMap[firstCat].value(firstCatFrac); } void LauTimeDepNonFlavModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); // Start by caching the tagging and CP-eigenstate information evtTagCatVals_.clear(); evtTagFlvVals_.clear(); evtCPEigenVals_.clear(); if ( ! inputFitData->haveBranch( tagCatVarName_ ) ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagCatVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! inputFitData->haveBranch( tagVarName_ ) ) { std::cerr << "ERROR in LauTimeDepNonFlavModel::cacheInputFitVars : Input data does not contain branch \"" << tagVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtTagCatVals_.reserve( nEvents ); evtTagFlvVals_.reserve( nEvents ); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); fitdata_iter = dataValues.find( tagCatVarName_ ); curEvtTagCat_ = static_cast( fitdata_iter->second ); if ( ! this->validTagCat( curEvtTagCat_ ) ) { std::cerr << "WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Invalid tagging category " << curEvtTagCat_ << " for event " << iEvt << ", setting it to untagged" << std::endl; curEvtTagCat_ = 0; } evtTagCatVals_.push_back( curEvtTagCat_ ); fitdata_iter = dataValues.find( tagVarName_ ); curEvtTagFlv_ = static_cast( fitdata_iter->second ); if ( TMath::Abs( curEvtTagFlv_ ) != 1 ) { if ( curEvtTagFlv_ > 0 ) { std::cerr << "WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to +1" << std::endl; curEvtTagFlv_ = +1; } else { std::cerr << "WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Invalid tagging output " << curEvtTagFlv_ << " for event " << iEvt << ", setting it to -1" << std::endl; curEvtTagFlv_ = -1; } } evtTagFlvVals_.push_back( curEvtTagFlv_ ); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else { std::cerr<<"WARNING in LauTimeDepNonFlavModel::cacheInputFitVars : Unknown value: "<useDP() == kTRUE) { // DecayTime and SigmaDecayTime for (LauTagCatDtPdfMap::iterator dt_iter = signalDecayTimePdfs_.begin(); dt_iter != signalDecayTimePdfs_.end(); ++dt_iter) { (*dt_iter).second->cacheInfo(*inputFitData); } } // ...and then the extra PDFs for (LauTagCatPdfMap::iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter) { this->cacheInfo(pdf_iter->second, *inputFitData); } if (this->useDP() == kTRUE) { sigModelB0bar_f_->fillDataTree(*inputFitData); sigModelB0_f_->fillDataTree(*inputFitData); sigModelB0bar_fbar_->fillDataTree(*inputFitData); sigModelB0_fbar_->fillDataTree(*inputFitData); } } Double_t LauTimeDepNonFlavModel::getTotEvtLikelihood(const UInt_t iEvt) { // Find out whether the tag-side B was a B0 or a B0bar. curEvtTagFlv_ = evtTagFlvVals_[iEvt]; // Also get the tagging category. curEvtTagCat_ = evtTagCatVals_[iEvt]; // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); if (this->useDP() == kFALSE) { signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); } // Construct the total event likelihood Double_t likelihood(sigLike*signalTagCatFrac_[curEvtTagCat_].unblindValue()); if ( ! signalEvents_->fixed() ) { likelihood *= signalEvents; } return likelihood; } Double_t LauTimeDepNonFlavModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); return eventSum; } void LauTimeDepNonFlavModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. sigDPLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. if ( this->useDP() == kFALSE ) { return; } // Mistag probabilities. Defined as: omega = prob of the tagging B0 being reported as B0bar // Whether we want omega or omegaBar depends on q_tag, hence curEvtTagFlv_*... in the previous lines //Double_t misTagFrac = 0.5 * (1.0 - dilution_[curEvtTagCat_] - qDDo2); //Double_t misTagFracBar = 0.5 * (1.0 - dilution_[curEvtTagCat_] + qDDo2); // Calculate event quantities qD_ = curEvtTagFlv_*dilution_[curEvtTagCat_].unblindValue(); qDDo2_ = curEvtTagFlv_*0.5*deltaDilution_[curEvtTagCat_].unblindValue(); //LauDecayTimePdf* signalDtPdf = signalDecayTimePdfs_[curEvtTagCat_]; LauDecayTimePdf* decayTimePdf = signalDecayTimePdfs_[curEvtTagCat_]; - decayTimePdf->calcLikelihoodInfo(iEvt); + decayTimePdf->calcLikelihoodInfo(static_cast(iEvt)); // Calculate the relevant amplitude normalisation for the two DP's this->calculateAmplitudeNorm(decayTimePdf); Double_t randNo = LauRandom::randomFun()->Rndm(); if (randNo < normTimeDP_f_/(normTimeDP_f_+normTimeDP_fbar_)) { finalState_ = +1; // A(Abar) -> f // Calculate the likelihood for the f final state sigModelB0bar_f_->calcLikelihoodInfo(iEvt); sigModelB0_f_->calcLikelihoodInfo(iEvt); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_f_, sigModelB0_f_); } else { finalState_ = -1; // A(Abar) -> fbar // Calculate the likelihood for the fbar final state sigModelB0bar_fbar_->calcLikelihoodInfo(iEvt); sigModelB0_fbar_->calcLikelihoodInfo(iEvt); // Calculate DP terms this->calculateDPterms(decayTimePdf, sigModelB0bar_fbar_, sigModelB0_fbar_); } // Calculate the normalised signal likelihood sigDPLike_ = ASq_ / (normTimeDP_f_+normTimeDP_fbar_); } void LauTimeDepNonFlavModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* pdfList = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; if (pdfList) { sigExtraLike_ = this->prodPdfValue( *pdfList, iEvt ); } } void LauTimeDepNonFlavModel::updateCoeffs() { coeffsB0bar_f_.clear(); coeffsB0_f_.clear(); coeffsB0bar_fbar_.clear(); coeffsB0_fbar_.clear(); coeffsB0bar_f_.reserve(nSigComp_); coeffsB0_f_.reserve(nSigComp_); coeffsB0bar_fbar_.reserve(nSigComp_); coeffsB0_fbar_.reserve(nSigComp_); for (UInt_t i = 0; i < nSigComp_; ++i) { coeffsB0bar_f_.push_back(coeffPars_B0fbar_B0barf_[i]->antiparticleCoeff()); coeffsB0_f_.push_back(coeffPars_B0f_B0barfbar_[i]->particleCoeff()); coeffsB0bar_fbar_.push_back(coeffPars_B0f_B0barfbar_[i]->antiparticleCoeff()); coeffsB0_fbar_.push_back(coeffPars_B0fbar_B0barf_[i]->particleCoeff()); } } Bool_t LauTimeDepNonFlavModel::validTagCat(Int_t tagCat) const { return (validTagCats_.find(tagCat) != validTagCats_.end()); } Bool_t LauTimeDepNonFlavModel::checkTagCatFracMap(const LauTagCatParamMap& theMap) const { // First check that there is an entry for each tagging category. // NB an entry won't have been added if it isn't a valid category // so don't need to check for that here. if (theMap.size() != signalTagCatFrac_.size()) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::checkTagCatFracMap : Not all tagging categories present."< 1E-10) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::checkTagCatFracMap : Tagging category event fractions do not sum to unity."< -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepNonFlavModel::embedSignal(Int_t tagCat, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if (signalTree_[tagCat]) { std::cerr<<"ERROR in LauTimeDepNonFlavModel::embedSignal : Already embedding signal from file for tagging category "<findBranches(); if (!dataOK) { delete signalTree_[tagCat]; signalTree_[tagCat] = 0; std::cerr<<"ERROR in LauTimeDepNonFlavModel::embedSignal : Problem creating data tree for embedding."<addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); } // Store the likelihoods for each extra PDF const LauPdfList* pdfList( &(sigExtraPdf_.begin()->second) ); this->addSPlotNtupleBranches(pdfList, "sig"); } void LauTimeDepNonFlavModel::addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix) { if (!extraPdfs) { return; } // Loop through each of the PDFs for (LauPdfList::const_iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } TString name(prefix); name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepNonFlavModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // calculate the likelihood for this event (*pdf_iter)->calcLikelihoodInfo(iEvt); extraLike = (*pdf_iter)->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars(""); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; Double_t indivLike = (*pdf_iter)->getLikelihood( (*var_iter) ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } TString name(prefix); name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } LauPdfList pdfList( (sigExtraPdf_.begin()->second) ); for (LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { // Loop over the variables involved in each PDF for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { // If they are not DP coordinates then add them if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { nameSet.insert( (*var_iter) ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepNonFlavModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::NumbMap LauTimeDepNonFlavModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } return numbMap; } LauSPlot::TwoDMap LauTimeDepNonFlavModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; const LauPdfList* pdfList = &(sigExtraPdf_.begin()->second); for (LauPdfList::const_iterator pdf_iter = pdfList->begin(); pdf_iter != pdfList->end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); for ( std::vector::const_iterator var_iter = (*pdf_iter)->varNames().begin(); var_iter != (*pdf_iter)->varNames().end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( (*pdf_iter)->varNames()[0], (*pdf_iter)->varNames()[1] ) ) ); } } return twodimMap; } void LauTimeDepNonFlavModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepNonFlavModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_f_->initialise(coeffsB0bar_f_); sigModelB0_f_->initialise(coeffsB0_f_); sigModelB0bar_fbar_->initialise(coeffsB0bar_fbar_); sigModelB0_fbar_->initialise(coeffsB0_fbar_); sigModelB0bar_f_->fillDataTree(*inputFitData); sigModelB0_f_->fillDataTree(*inputFitData); sigModelB0bar_fbar_->fillDataTree(*inputFitData); sigModelB0_fbar_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_f_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 curEvtTagFlv_ = evtTagFlvVals_[iEvt]; curEvtTagCat_ = evtTagCatVals_[iEvt]; LauTagCatPdfMap::iterator sig_iter = sigExtraPdf_.find(curEvtTagCat_); LauPdfList* sigPdfs = (sig_iter != sigExtraPdf_.end())? &(sig_iter->second) : 0; // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigPdfs, "sig", iEvt); // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepNonFlavModel::storePerEvtLlhds : Finished storing per-event likelihood values."<