diff --git a/examples/GenFitTimeDep_Bd2Dpipi.cc b/examples/GenFitTimeDep_Bd2Dpipi.cc index 38a40ff..3494eac 100644 --- a/examples/GenFitTimeDep_Bd2Dpipi.cc +++ b/examples/GenFitTimeDep_Bd2Dpipi.cc @@ -1,1084 +1,1082 @@ #include #include #include #include #include #include #include "TFile.h" #include "TH2.h" #include "TRandom.h" #include "TString.h" #include "TSystem.h" #include "TF1.h" #include "TCanvas.h" #include "Lau1DHistPdf.hh" #include "Lau1DCubicSpline.hh" #include "LauBinnedDecayTimeEfficiency.hh" #include "LauBkgndDPModel.hh" #include "LauCrystalBallPdf.hh" #include "LauDaughters.hh" #include "LauDecayTime.hh" #include "LauDecayTimePdf.hh" #include "LauDecayTimePhysicsModel.hh" #include "LauDecayTimeResolution.hh" #include "LauEffModel.hh" #include "LauExponentialPdf.hh" #include "LauFlavTag.hh" #include "LauGounarisSakuraiRes.hh" #include "LauIsobarDynamics.hh" #include "LauLHCbNR.hh" #include "LauMagPhaseCoeffSet.hh" #include "LauRandom.hh" #include "LauRealImagCoeffSet.hh" #include "LauResonanceMaker.hh" #include "LauSplineDecayTimeEfficiency.hh" #include "LauSumPdf.hh" #include "LauTimeDepFitModel.hh" #include "LauVetoes.hh" #include "TimeDep_Dpipi_ProgOpts.hh" // Helper function for creating Double-Crystal-Ball PDFs LauAbsPdf* makeDCBPDF( const TString& varName, const Double_t varMin, const Double_t varMax, const TString& componentName, const std::map paramVals ); // Helper function for creating sets of DTA splines // TODO - not yet used using LauCubicSplineDecayTimeEfficiencyPtr = std::unique_ptr>; using LauSixthSplineDecayTimeEfficiencyPtr = std::unique_ptr>; auto createSplineDTASet( const TString& jsonFileName, const TString& controlSplineName, const TString& signalSplineName, const std::vector& bkgndSplineNames ) { auto controlSpline = Lau1DCubicSpline::readFromJson( jsonFileName, controlSplineName ); LauCubicSplineDecayTimeEfficiencyPtr controlEff { std::make_unique>( controlSplineName, std::move( controlSpline ) ) }; LauSixthSplineDecayTimeEfficiencyPtr signalEff; if ( signalSplineName != "" ) { auto signalSpline = Lau1DCubicSpline::readFromJson( jsonFileName, signalSplineName ); signalEff = std::make_unique>( signalSplineName, *controlEff, std::move( signalSpline ) ); } std::vector bkgndEffs; bkgndEffs.reserve( bkgndSplineNames.size() ); for ( auto& bkgndSplineName : bkgndSplineNames ) { auto bkgndSpline = Lau1DCubicSpline::readFromJson( jsonFileName, bkgndSplineName ); bkgndEffs.push_back( std::make_unique>( bkgndSplineName, *controlEff, std::move( bkgndSpline ) ) ); } return std::make_tuple( std::move(controlEff), std::move(signalEff), std::move(bkgndEffs) ); } class BgTagDecayFlvAsym { private: const Double_t eff_decayB0_taggedB0 {0.}; const Double_t eff_decayB0_taggedB0bar {0.}; const Double_t eff_decayB0bar_taggedB0 {0.}; const Double_t eff_decayB0bar_taggedB0bar {0.}; const Double_t eff_decayB0_ave {0.}; const Double_t eff_decayB0bar_ave {0.}; const Double_t eff_decayB0_delta {0.}; const Double_t eff_decayB0bar_delta {0.}; public: BgTagDecayFlvAsym() = default; BgTagDecayFlvAsym( //This constructor assumes a tagging efficincy of 100% const Double_t n_decayB0_taggedB0, const Double_t n_decayB0_taggedB0bar, const Double_t n_decayB0bar_taggedB0, const Double_t n_decayB0bar_taggedB0bar ): eff_decayB0_taggedB0 {n_decayB0_taggedB0 /(n_decayB0_taggedB0+n_decayB0_taggedB0bar)}, eff_decayB0_taggedB0bar {n_decayB0_taggedB0bar/(n_decayB0_taggedB0+n_decayB0_taggedB0bar)}, eff_decayB0bar_taggedB0 {n_decayB0bar_taggedB0 /(n_decayB0bar_taggedB0+n_decayB0bar_taggedB0bar)}, eff_decayB0bar_taggedB0bar{n_decayB0bar_taggedB0bar/(n_decayB0bar_taggedB0+n_decayB0bar_taggedB0bar)}, eff_decayB0_ave {0.5*(eff_decayB0_taggedB0 + eff_decayB0_taggedB0bar)}, eff_decayB0bar_ave {0.5*(eff_decayB0bar_taggedB0 + eff_decayB0bar_taggedB0bar)}, eff_decayB0_delta {eff_decayB0_taggedB0 - eff_decayB0_taggedB0bar}, eff_decayB0bar_delta {eff_decayB0bar_taggedB0 - eff_decayB0bar_taggedB0bar} {} std::pair decayB0() const {return std::make_pair(eff_decayB0_taggedB0 ,eff_decayB0_taggedB0bar );} std::pair decayB0bar() const {return std::make_pair(eff_decayB0bar_taggedB0,eff_decayB0bar_taggedB0bar);} std::pair aveDeltaB0() const {return std::make_pair(eff_decayB0_ave , eff_decayB0_delta) ;} std::pair aveDeltaB0bar() const {return std::make_pair(eff_decayB0bar_ave, eff_decayB0bar_delta);} //TODO add constructor for if tagging eff isn't 100%? }; int main(const int argc, const char ** argv) { const TestDpipi_ProgramSettings settings{argc,argv}; if ( ! settings.parsedOK ) { return EXIT_FAILURE; } if ( settings.helpRequested ) { return EXIT_SUCCESS; } if(settings.dataFit and settings.command == Command::Generate) { std::cerr << "I can't generate data! Wait for Run 3" << std::endl; return EXIT_FAILURE; } if(settings.dataFit and not settings.blindFit and not settings.fixPhiMix) { std::cerr << "We don't have permission to do unblinded fits yet!" << std::endl; return EXIT_FAILURE; } if(settings.run == 0 or settings.run > 2) { std::cerr << "The Run number must be either 1 or 2" << std::endl; return EXIT_FAILURE; } const UInt_t runNo = settings.run; LauRandom::setSeed(settings.RNGseed); LauDaughters* daughtersB0bar = new LauDaughters("B0_bar", "pi+", "pi-", "D0", kTRUE); LauDaughters* daughtersB0 = new LauDaughters("B0", "pi+", "pi-", "D0_bar",kTRUE); // efficiency - LauVetoes* vetoes = new LauVetoes(); - vetoes->addMassVeto( 1, 2.0, 2.1 ); //D* veto - vetoes->addMassVeto( 2, 2.0, 2.1 ); //D* veto - LauEffModel* effModelB0bar = new LauEffModel(daughtersB0bar, vetoes); - LauEffModel* effModelB0 = new LauEffModel(daughtersB0, vetoes); + auto vetoes { LauVetoes::readFromJson( "Bd2D0pipi_DP_Vetoes.json", "Bd2D0pipi_Vetoes" ) }; + LauEffModel* effModelB0bar = new LauEffModel(daughtersB0bar, vetoes.get()); + LauEffModel* effModelB0 = new LauEffModel(daughtersB0, vetoes.get()); TFile* effFile(nullptr); TH2* effHist(nullptr); if (settings.directory == "B2Dhh_Kpi"){ effFile = TFile::Open(Form("%s/%s/eff_map_mprime_thprime_model_d2kpi_run%u.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str(),runNo)); effHist = dynamic_cast(effFile->Get(Form("eff_map_mprime_thprime_model_d2kpi_run%u",runNo))); } else if (settings.directory == "B2Dhh_KK"){ effFile = TFile::Open(Form("%s/%s/eff_map_mprime_thprime_model_d2kk_run%u.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str(),runNo)); effHist = dynamic_cast(effFile->Get(Form("eff_map_mprime_thprime_model_d2kk_run%u",runNo))); } else if (settings.directory == "B2Dhh_pipi"){ effFile = TFile::Open(Form("%s/%s/eff_map_mprime_thprime_model_d2pipi_run%u.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str(),runNo)); effHist = dynamic_cast(effFile->Get(Form("eff_map_mprime_thprime_model_d2pipi_run%u",runNo))); } else {std::cerr << "bad dir given!" << std::endl; return EXIT_FAILURE;} effModelB0->setEffHisto(effHist, kTRUE, kFALSE, 0.0, 0.0, kFALSE, kTRUE); effModelB0bar->setEffHisto(effHist, kTRUE, kFALSE, 0.0, 0.0, kFALSE, kTRUE); // background types std::map BkgndTypes { {"comb", LauFlavTag::BkgndType::Combinatorial}, {"Bs2DKpi", LauFlavTag::BkgndType::FlavourSpecific}, {"Bd2DKpi", LauFlavTag::BkgndType::FlavourSpecific}, {"Lb2Dppi", LauFlavTag::BkgndType::FlavourSpecific} }; auto notInBkgndList = [&settings](const std::map::value_type& e) { return ( settings.bkgndList.find( e.first.Data() ) == settings.bkgndList.end() ); }; std::vector toRemove; for ( const auto& elem : BkgndTypes ) { if ( notInBkgndList(elem) ) { toRemove.push_back( elem.first ); } } for ( const auto& name : toRemove ) { BkgndTypes.erase( name ); } // setup flavour tagging const Bool_t useAveDeltaCalibVals { settings.useAveDeltaCalibVals }; const Bool_t useEtaPrime { kFALSE }; //LauFlavTag* flavTag = new LauFlavTag(useAveDeltaCalibVals,useEtaPrime); LauFlavTag* flavTag = new LauFlavTag(useAveDeltaCalibVals,useEtaPrime,BkgndTypes); if (settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS) { flavTag->setDecayFlvVarName("decayFlv"); } if(not settings.dataFit) { flavTag->setTrueTagVarName("trueTag"); } TFile* etaFile = TFile::Open(Form("%s/%s/ETAhists.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str())); TH1* etaHist{nullptr}; TH1* etaHistB0{nullptr}; TH1* etaHistB0bar{nullptr}; if (settings.directory == "B2Dhh_Kpi"){ etaHistB0 = dynamic_cast(etaFile->Get(Form("signalMC_Kpi_Run%u_etaHist_dcyB",runNo))); etaHistB0bar = dynamic_cast(etaFile->Get(Form("signalMC_Kpi_Run%u_etaHist_dcyBbar",runNo))); etaHist = dynamic_cast(etaHistB0->Clone(Form("signalMC_Kpi_Run%u_etaHist",runNo))); etaHist->Add(etaHistB0bar); } else if (settings.directory == "B2Dhh_KK"){ etaHist = dynamic_cast(etaFile->Get(Form("signalMC_KK_Run%u_etaHist",runNo))); } else if (settings.directory == "B2Dhh_pipi"){ etaHist = dynamic_cast(etaFile->Get(Form("signalMC_pipi_Run%u_etaHist",runNo))); } else {std::cerr << "bad dir given!" << std::endl; return EXIT_FAILURE;} // Crude check as to whether we're doing perfect vs realistic mis-tag // - in the former case all entries should be in the first bin // If the tagging is perfect then don't interpolate the eta histogram // and also make it perfectly efficient, otherwise do interpolate and // make it 50% efficient Lau1DHistPdf* etaHistPdf = new Lau1DHistPdf( "eta", etaHist, 0.0, 0.5, kTRUE, kFALSE ); const Double_t meanEta { etaHistPdf->getMean() }; const Double_t tagEffVal{1.0}; std::pair tagEff {tagEffVal, useAveDeltaCalibVals ? 0.0 : tagEffVal}; // use a null calibration for the time being, so p0 = and p1 = 1 std::pair calib0 {meanEta, useAveDeltaCalibVals ? 0.0 : meanEta}; std::pair calib1 {1.0, useAveDeltaCalibVals ? 0.0 : 1.0}; if(settings.addTaggers) { flavTag->addTagger(Form("IFT_Run%u",runNo), "IFT_TAGDEC", "IFT_TAGETA", etaHistPdf, tagEff, calib0, calib1); if (settings.floatCalibPars) { flavTag->floatAllCalibPars(); } } // flavour tagging setup for backgrounds std::map< TString, std::pair > BkgndEtas; TH1* bkgndEtaHist1{nullptr}; Lau1DHistPdf* bkgndEtaHistPDF1{nullptr}; TH1* bkgndEtaHistMinus1{nullptr}; Lau1DHistPdf* bkgndEtaHistPDFMinus1{nullptr}; for(auto& [name, _] : BkgndTypes) { const char* decayName = name == "comb" ? "sideband" : name.Data(); if (settings.directory == "B2Dhh_Kpi"){ bkgndEtaHist1 = dynamic_cast(etaFile->Get( Form("%s_Kpi_Run%u_etaHist_dcyB" ,decayName,runNo) )); bkgndEtaHistPDF1 = new Lau1DHistPdf("eta",bkgndEtaHist1 ,0.0,0.5,kTRUE,kFALSE); bkgndEtaHistMinus1 = dynamic_cast(etaFile->Get( Form("%s_Kpi_Run%u_etaHist_dcyBbar",decayName,runNo) )); bkgndEtaHistPDFMinus1 = new Lau1DHistPdf("eta",bkgndEtaHistMinus1,0.0,0.5,kTRUE,kFALSE); } else if (settings.directory == "B2Dhh_KK"){ bkgndEtaHist1 = dynamic_cast(etaFile->Get( Form("%s_KK_Run%u_etaHist" ,decayName,runNo) )); bkgndEtaHistPDF1 = new Lau1DHistPdf("eta",bkgndEtaHist1 ,0.0,0.5,kTRUE,kFALSE); } else if (settings.directory == "B2Dhh_pipi"){ bkgndEtaHist1 = dynamic_cast(etaFile->Get( Form("%s_pipi_Run%u_etaHist" ,decayName,runNo) )); bkgndEtaHistPDF1 = new Lau1DHistPdf("eta",bkgndEtaHist1 ,0.0,0.5,kTRUE,kFALSE); } else {std::cerr << "bad dir given!" << std::endl; return EXIT_FAILURE;} BkgndEtas.emplace( std::make_pair(name, std::make_pair(bkgndEtaHistPDF1,bkgndEtaHistPDFMinus1) ) ); } for(auto& [name,hists] : BkgndEtas) { if (settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS){ if(name == "comb") { const BgTagDecayFlvAsym bgt = runNo == 1 ? BgTagDecayFlvAsym(535.,576.,456.,588.) : BgTagDecayFlvAsym(1514.,1497.,1220.,1575.); if(useAveDeltaCalibVals) { flavTag->setBkgndParams(name, Form("IFT_Run%u",runNo), hists.first , bgt.aveDeltaB0(), hists.second , bgt.aveDeltaB0bar() ); } else { flavTag->setBkgndParams(name, Form("IFT_Run%u",runNo), hists.first , bgt.decayB0(), hists.second , bgt.decayB0bar()); } } else{flavTag->setBkgndParams(name, Form("IFT_Run%u",runNo), hists.first, tagEff, hists.second, tagEff );} //flavTag->setBkgndParams(name, Form("IFT_Run%u",runNo), hist, name == "comb" ? std::make_pair(0.5,0.038) : tagEff ); } else if (settings.dType == LauTimeDepFitModel::CPEigenvalue::CPEven){ if(name == "comb") { BgTagDecayFlvAsym* bgt = nullptr; if(settings.directory == "B2Dhh_KK") { bgt = runNo == 1 ? new BgTagDecayFlvAsym(344.,442.,344.,442.) : new BgTagDecayFlvAsym(710.,814.,710.,814.); } else { bgt = runNo == 1 ? new BgTagDecayFlvAsym(55.,68.,55.,68.) : new BgTagDecayFlvAsym(210.,241.,210.,241.); } std::pair effs{bgt->decayB0()}; if(useAveDeltaCalibVals){effs = bgt->aveDeltaB0();} flavTag->setBkgndParams(name, Form("IFT_Run%u",runNo), hists.first, effs); delete bgt; } else{flavTag->setBkgndParams(name, Form("IFT_Run%u",runNo), hists.first, tagEff);} // else{flavTag->setBkgndParams(name, Form("IFT_Run%u",runNo), hists.first, tagEff, hists.second, tagEff );} FIXME once we have hists split by decay flv, uncomment this line and remove the above... actually don't FIXME, this is fine as above since the data isn't flavour specific in the CP modes. Keep it combined } } // signal dynamics if ( not std::filesystem::exists("Bd2D0pipi_DP_Model_Coeffs.json") ) { std::cerr << "Warning : couldn't find the json file of DP model coeffs; attempting to fetch from eos ..." << std::endl; int r = system( Form("xrdcp %s/%s/Bd2D0pipi_DP_Model_Coeffs.json .", settings.eosRoot.c_str(), settings.eosConfigDir.c_str()) ); if (r != 0) { std::cerr << "FATAL : couldn't get the json file from remote!" << std::endl; return 1;} } LauIsobarDynamics* sigModelB0bar = new LauIsobarDynamics(daughtersB0bar, effModelB0bar); if (settings.directory == "B2Dhh_Kpi"){ sigModelB0bar->setIntFileName(Form("integ_Kpi_Run%u_B0bar.dat",runNo)); } else if (settings.directory == "B2Dhh_KK"){ sigModelB0bar->setIntFileName(Form("integ_KK_Run%u_B0bar.dat",runNo)); } else if (settings.directory == "B2Dhh_pipi"){ sigModelB0bar->setIntFileName(Form("integ_pipi_Run%u_B0bar.dat",runNo)); } sigModelB0bar->constructModelFromJson( "Bd2D0pipi_DP_Model_Coeffs.json", "Bd2D0pipi_B0bar_Model" ); LauIsobarDynamics* sigModelB0 = new LauIsobarDynamics(daughtersB0, effModelB0); if (settings.directory == "B2Dhh_Kpi"){ sigModelB0->setIntFileName(Form("integ_Kpi_Run%u_B0.dat",runNo)); } else if (settings.directory == "B2Dhh_KK"){ sigModelB0->setIntFileName(Form("integ_KK_Run%u_B0.dat",runNo)); } else if (settings.directory == "B2Dhh_pipi"){ sigModelB0->setIntFileName(Form("integ_pipi_Run%u_B0.dat",runNo)); } sigModelB0->constructModelFromJson( "Bd2D0pipi_DP_Model_Coeffs.json", "Bd2D0pipi_B0_Model" ); // fit model LauTimeDepFitModel* fitModel = new LauTimeDepFitModel(sigModelB0bar,sigModelB0,flavTag); fitModel->breakOnBadLikelihood(kFALSE); fitModel->sendFixedYieldsToFitter(kTRUE); std::vector> coeffset { LauAbsCoeffSet::readFromJson( "Bd2D0pipi_DP_Model_Coeffs.json", "Bd2D0pipi_Coeffs" ) }; for ( auto& coeff : coeffset ) { fitModel->setAmpCoeffSet( std::move(coeff) ); } // background DP models LauBkgndDPModel* BkgndModel{nullptr}; LauBkgndDPModel* BkgndModelbar{nullptr}; TFile* BkgndHistFile = TFile::Open(Form("%s/%s/BG_SDPs.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str())); TFile* BkgndHistFile2 = TFile::Open(Form("%s/%s/sidebandSDP.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str())); TH2* BkgndHistB0{nullptr}; TH2* BkgndHistB0bar{nullptr}; for(auto& [name, _] : BkgndTypes) { //Only have Kpi histos for now for misid if( name=="comb"){ if( settings.directory=="B2Dhh_Kpi"){ BkgndHistB0 = dynamic_cast(BkgndHistFile2-> Get(Form("sideband_Run%u_Kpi_SDP_decayFlv1",runNo))); BkgndHistB0bar = dynamic_cast(BkgndHistFile2->Get(Form("sideband_Run%u_Kpi_SDP_decayFlvMinus1",runNo))); } else if( settings.directory=="B2Dhh_KK"){ BkgndHistB0 = dynamic_cast(BkgndHistFile2->Get(Form("sideband_Run%u_KK_SDP",runNo))); } else if( settings.directory=="B2Dhh_pipi"){ BkgndHistB0 = dynamic_cast(BkgndHistFile2->Get(Form("sideband_Run%u_pipi_SDP",runNo))); } } else { TString mode = settings.directory; mode.ReplaceAll("B2Dhh_",""); TString histName = Form("%s_%s_Run%u_SDP_decFlv",name.Data(),mode.Data(),runNo); BkgndHistB0 = dynamic_cast(BkgndHistFile->Get(histName+"1")); BkgndHistB0bar = dynamic_cast(BkgndHistFile->Get(histName+"Minus1")); } - BkgndModel = new LauBkgndDPModel(daughtersB0, vetoes); + BkgndModel = new LauBkgndDPModel(daughtersB0, vetoes.get()); BkgndHistB0->Smooth(); BkgndModel->setBkgndSpline(BkgndHistB0, kFALSE, kFALSE, kTRUE ); //For CP comb we don't split based on decay flavour since it's impossible if(name == "comb" and settings.directory!="B2Dhh_Kpi") {BkgndModelbar=nullptr;} else { - BkgndModelbar = new LauBkgndDPModel(daughtersB0bar, vetoes); + BkgndModelbar = new LauBkgndDPModel(daughtersB0bar, vetoes.get()); BkgndHistB0bar->Smooth(); BkgndModelbar->setBkgndSpline(BkgndHistB0bar, kFALSE, kFALSE, kTRUE ); } fitModel->setBkgndDPModels( name, BkgndModel, BkgndModelbar ); } // decay type and mixing parameter const Bool_t fixPhiMix{ settings.fixPhiMix || settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS }; const Bool_t useSinCos{ settings.useSinCos }; fitModel->setCPEigenvalue( settings.dType ); fitModel->setPhiMix( 2.0*LauConstants::beta, fixPhiMix, useSinCos ); if ( useSinCos ) { if ( settings.blindFit ) { //Blinding strings generated from: https://www.thewordfinder.com/random-sentence-generator/ const TString sinBlindString{settings.dataFit ? "Tom realized he could be making a big mistake." : "Don't Look a Gift Horse In The Mouth"}; const TString cosBlindString{settings.dataFit ? "He admitted defeat." : "Long In The Tooth"}; fitModel->blindPhiMix( sinBlindString, cosBlindString ); } std::cout << "Is sin(phiMix) blinded? " << std::boolalpha << fitModel->sinPhiMixBlinded() << std::endl; std::cout << "Is cos(phiMix) blinded? " << std::boolalpha << fitModel->cosPhiMixBlinded() << std::endl; } else { if ( settings.blindFit ) { const TString blindString{"A Cut Above"}; fitModel->blindPhiMix( blindString ); } std::cout << "Is phiMix blinded? " << std::boolalpha << fitModel->phiMixBlinded() << std::endl; } // production asymmetries fitModel->setAsymmetries( 0.0, kTRUE ); for(auto& [name, _] : BkgndTypes) { fitModel->setBkgndAsymmetries( name, 0.0, kTRUE ); } // decay time PDFs //const Double_t minDt(0.200103); //const Double_t maxDt(14.9998); const Double_t minDt(0.2); const Double_t maxDt(15.0); const Double_t minDtErr(0.0); const Double_t maxDtErr(0.15); LauParameter * tauB0 = new LauParameter("dt_tau", 1.519, 0.5, 5.0, settings.fixLifetime); LauParameter * deltaMd = new LauParameter("dt_deltaM", 0.5065, 0.0, 1.0, settings.fixDeltaM); std::vector dtPhysPars { tauB0, deltaMd }; auto dtPhysModel = std::make_unique( LauDecayTime::FuncType::ExpTrig, dtPhysPars ); const std::vector scale { false, false, false // settings.perEventTimeErr && true, // settings.perEventTimeErr && true, }; const std::size_t nGauss{scale.size()}; LauParameter * mean0 = new LauParameter("dt_mean_2", runNo == 1 ? -0.00174035 : -0.00218543 , -0.01, 0.01, kTRUE ); LauParameter * mean1 = new LauParameter("dt_mean_1", runNo == 1 ? 0.043133 : 0.046862 , -0.10, 0.10, kTRUE ); LauParameter * mean2 = new LauParameter("dt_mean_0", runNo == 1 ? -0.00108159 : -0.00159348 , -0.01, 0.01, kTRUE ); LauParameter * sigma0 = new LauParameter("dt_sigma_2", runNo == 1 ? 0.046272 : 0.046064 , 0.0, 2.0, kTRUE ); LauParameter * sigma1 = new LauParameter("dt_sigma_1", runNo == 1 ? 0.11815 : 0.11870 , 0.0, 2.5, kTRUE ); LauParameter * sigma2 = new LauParameter("dt_sigma_0", runNo == 1 ? 0.025127 : 0.025077 , 0.0, 2.5, kTRUE ); LauParameter * frac0 = new LauParameter("dt_frac_2", runNo == 1 ? 0.44359 : 0.43327 , 0.0, 1.0, kTRUE ); LauParameter * frac1 = new LauParameter("dt_frac_1", runNo == 1 ? 0.045545 : 0.045310 , 0.0, 1.0, kTRUE ); std::vector dtResoPars { mean0, mean1, mean2, sigma0, sigma1, sigma2, frac0, frac1 }; auto dtResoModel = std::make_unique( nGauss, dtResoPars, scale ); //Bs backgrounds LauParameter * tauBs = new LauParameter("dt_tau_Bs", 1.527, 1.526, 1.528, kTRUE); LauParameter * deltaMs = new LauParameter("dt_deltaM_Bs", 17.765, 17.60, 17.80, kTRUE); LauParameter * deltaGs = new LauParameter("dt_deltaGamma_Bs", 0.084, 0.080, 0.090, kTRUE); //Lb backgrounds LauParameter * tauLb = new LauParameter("dt_tau_Lb", 1.464, 1.450, 1.470, kTRUE); // Decay time error histogram // (always set this so that it gets generated properly, whether we're using it in the PDF or not) TFile* dtErrFile = TFile::Open(Form("%s/%s/dte-hist.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str())); TH1* dtErrHist = dynamic_cast(dtErrFile->Get("dte_hist")); LauDecayTimePdf * dtPdf{nullptr}; if ( settings.timeResolution ) { if ( settings.perEventTimeErr ) { dtPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, "decayTimeErr", minDtErr, maxDtErr, std::move(dtPhysModel), std::move(dtResoModel), dtErrHist ); } else { dtPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, std::move(dtPhysModel), std::move(dtResoModel) ); } } else { dtPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, std::move(dtPhysModel) ); } // Decay time acceptance histogram TFile* dtaFile = TFile::Open(Form("%s/%s/DTAhists.root",settings.eosRoot.c_str(),settings.eosConfigDir.c_str())); TH1* dtaHist{nullptr}; if (settings.directory == "B2Dhh_Kpi"){ dtaHist = dynamic_cast(dtaFile->Get(Form("signalMC_Kpi_Run%u_DTAhist",runNo))); } else if (settings.directory == "B2Dhh_KK"){ dtaHist = dynamic_cast(dtaFile->Get(Form("signalMC_KK_Run%u_DTAhist",runNo))); } else if (settings.directory == "B2Dhh_pipi"){ dtaHist = dynamic_cast(dtaFile->Get(Form("signalMC_pipi_Run%u_DTAhist",runNo))); } else {std::cerr << "bad dir given!" << std::endl; return EXIT_FAILURE;} if( not std::filesystem::exists("run1DTAsplines.json") and runNo == 1 ) { std::cerr << "Warning : couldn't find the json file of splines in Run 1; attempting to fetch from eos ..." << std::endl; int r = system( Form("xrdcp %s/%s/run1DTAsplines.json .", settings.eosRoot.c_str(), settings.eosConfigDir.c_str()) ); if(r != 0){ std::cerr << "FATAL : couldn't get the json file from remote!" << std::endl; return 1;} } if( not std::filesystem::exists("run2DTAsplines.json") and runNo == 2 ) { std::cerr << "Warning : couldn't find the json file of splines in Run 2; attempting to fetch from eos ..." << std::endl; int r = system( Form("xrdcp %s/%s/run2DTAsplines.json .", settings.eosRoot.c_str(), settings.eosConfigDir.c_str()) ); if(r != 0){ std::cerr << "FATAL : couldn't get the json file from remote!" << std::endl; return 1;} } //The below is replaced with the new Lau1DCubicSpline::readFromJson // Create the spline knot positions and // starting Y values, to be fit to dtaHist //const std::vector dtvals { 0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 7.0, 10.0, 15.0}; //const std::vector effvals {0.000, 0.010, 0.022, 0.035, 0.042, 0.050, 0.051, 0.052, 0.055}; //const std::vector corvals { 1.10, 1.08, 1.06, 1.04, 1.02, 1.00, 0.98, 0.96, 0.94}; std::unique_ptr dtEffSpline = nullptr; Lau1DCubicSpline* dtEffSplinePtr = nullptr; std::unique_ptr dtCorrSpline = nullptr; Lau1DCubicSpline* dtCorrSplinePtr = nullptr; LauSplineDecayTimeEfficiency* sigDTA = nullptr; std::unique_ptr> dtaModel; switch(settings.timeEffModel) { case LauDecayTime::EfficiencyMethod::Spline: { fitModel->setASqMaxValue(0.01); if ( settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS ) { fitModel->setASqMaxValue(0.009); } //auto dtEffSpline = std::make_unique( dtvals, effvals, Lau1DCubicSpline::SplineType::AkimaSpline ); TString jsonFilename = Form("run%uDTAsplines.json",runNo); dtEffSpline = Lau1DCubicSpline::readFromJson(jsonFilename, Form("signalMC_Kpi_Run%u_DTAhist",runNo)); dtEffSplinePtr = dtEffSpline.get(); dtaModel = std::make_unique>( Form("dteff_QFS_Run%u",runNo), std::move(dtEffSpline) ); dynamic_cast(dtaModel->getParameters().front())->maxValue(1e-1); //dtaModel->fitToTH1(dtaHist); // Set which knots to float and which to fix (at least 1 knot must be fixed, not the first one) // Knots should only be floating if requested AND the B lifetime is fixed! if ( settings.fixSplineKnots or not settings.fixLifetime ) { dtaModel->fixKnots(); } else { dtaModel->floatKnots(); dtaModel->fixKnot( 0, true ); dtaModel->fixKnot( 3, true ); } sigDTA = dtaModel.get(); if ( settings.dType == LauTimeDepFitModel::CPEigenvalue::QFS ) { // For the QFS mode we just use the cubic model as it is sigDTA = dtaModel.get(); dtPdf->setSplineEfficiency( std::move(dtaModel) ); } else { // For the CP modes we modify it using a corrective spline // Json available? //auto dtCorrSpline = std::make_unique( dtvals, corvals, Lau1DCubicSpline::SplineType::AkimaSpline ); std::unique_ptr> dtaCPModel; if (settings.directory == "B2Dhh_KK"){ dtCorrSpline = Lau1DCubicSpline::readFromJson(jsonFilename, Form("signalMC_KK_Run%u_DTAhist",runNo)); dtCorrSplinePtr = dtCorrSpline.get(); dtaCPModel = std::make_unique>( Form("dteff_CPKK_Run%u",runNo), *dtaModel, std::move( dtCorrSpline ) ); } else if (settings.directory == "B2Dhh_pipi"){ dtCorrSpline = Lau1DCubicSpline::readFromJson(jsonFilename, Form("signalMC_pipi_Run%u_DTAhist",runNo)); dtCorrSplinePtr = dtCorrSpline.get(); dtaCPModel = std::make_unique>( Form("dteff_CPpipi_Run%u",runNo), *dtaModel, std::move( dtCorrSpline ) ); } dtPdf->setSplineEfficiency( std::move(dtaCPModel) ); } break; } case LauDecayTime::EfficiencyMethod::Binned: { fitModel->setASqMaxValue(0.06); auto dtaBinnedModel = std::make_unique( *dtaHist ); dtPdf->setBinnedEfficiency( std::move(dtaBinnedModel) ); break; } case LauDecayTime::EfficiencyMethod::Uniform: { fitModel->setASqMaxValue(4.45); break; } } fitModel->setSignalDtPdf( dtPdf ); std::map bkgndDecayTimeTypes = { {"comb", LauDecayTime::FuncType::Hist}, {"Bd2DKpi", LauDecayTime::FuncType::ExpTrig}, {"Bs2DKpi", LauDecayTime::FuncType::ExpHypTrig}, {"Lb2Dppi", LauDecayTime::FuncType::Exp} }; TH1* bgDTA{nullptr}; std::map > decayFlvYields = { {"comb" , runNo == 1 ? std::make_pair( 1111, 1044) : std::make_pair( 3011, 2795)}, {"Bd2DKpi", runNo == 1 ? std::make_pair( 9552, 8933) : std::make_pair(13842,12036)}, {"Bs2DKpi", runNo == 1 ? std::make_pair(16989,15634) : std::make_pair(18294,17005)}, {"Lb2Dppi", runNo == 1 ? std::make_pair( 3695, 3431) : std::make_pair( 2389, 2501)} }; TString DTAnameSuffix{""}; if (settings.directory == "B2Dhh_Kpi"){ DTAnameSuffix = Form("_Kpi_Run%u_DTAhist",runNo); } else if (settings.directory == "B2Dhh_KK"){ DTAnameSuffix = Form("_KK_Run%u_DTAhist",runNo); } else if (settings.directory == "B2Dhh_pipi"){ DTAnameSuffix = Form("_pipi_Run%u_DTAhist",runNo); } for (auto& [bg, _] : BkgndTypes) { if ( bkgndDecayTimeTypes[bg] == LauDecayTime::FuncType::Hist ) { TString histname = bg + DTAnameSuffix; if(histname.Contains("comb")) { histname = "Sideband" + DTAnameSuffix; histname.ReplaceAll("DTAhist","DThist"); } TH1* dt_hist = dynamic_cast(dtaFile->Get(histname.Data())); TH1* dt_err_hist = nullptr; // TODO get these! if ( not dt_hist ) { std::cerr << "PROBLEM FINDING THE LIFETIME HIST:" << histname << std::endl; return EXIT_FAILURE; } if ( settings.perEventTimeErr and not dt_err_hist ) { std::cerr << "PROBLEM FINDING THE LIFETIME ERROR HIST:" << bg << std::endl; return EXIT_FAILURE; } LauDecayTimePdf* backgroundPdf {nullptr}; if ( settings.perEventTimeErr ) { backgroundPdf = new LauDecayTimePdf("decayTime", minDt, maxDt, "decayTimeErr", minDtErr, maxDtErr, dt_hist, dt_err_hist ); } else { backgroundPdf = new LauDecayTimePdf("decayTime", minDt, maxDt, dt_hist ); } fitModel->setBkgndDtPdf(bg,backgroundPdf); } else { std::vector bgPhysParams; if (bg == "Bd2DKpi") { bgPhysParams = { tauB0->createClone(), deltaMd->createClone() }; } else if (bg == "Bs2DKpi") { bgPhysParams = { tauBs, deltaGs, deltaMs }; } else if (bg == "Lb2Dppi") { bgPhysParams = { tauLb }; } auto bgPhysModel = std::make_unique( bkgndDecayTimeTypes[bg], bgPhysParams ); std::vector bgResoParams { mean0->createClone(), mean1->createClone(), mean2->createClone(), sigma0->createClone(), sigma1->createClone(), sigma2->createClone(), frac0->createClone(), frac1->createClone() }; auto bgResoModel = std::make_unique( nGauss, bgResoParams, scale ); TH1* dt_err_hist = nullptr; // TODO get these! if ( settings.perEventTimeErr and not dt_err_hist ) { std::cerr << "PROBLEM FINDING THE LIFETIME ERROR HIST:" << bg << std::endl; return EXIT_FAILURE; } LauDecayTimePdf* backgroundPdf {nullptr}; if ( settings.timeResolution ) { if ( settings.perEventTimeErr ) { backgroundPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, "decayTimeErr", minDtErr, maxDtErr, std::move(bgPhysModel), std::move(bgResoModel), dt_err_hist ); } else { backgroundPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, std::move(bgPhysModel), std::move(bgResoModel) ); } } else { backgroundPdf = new LauDecayTimePdf( "decayTime", minDt, maxDt, std::move(bgPhysModel) ); } // background decay time acceptance histogram TString histname = bg + DTAnameSuffix; bgDTA = dynamic_cast(dtaFile->Get(histname)); if ( not bgDTA and bg!="comb" ) { std::cerr << "Couldn't find DTA histogram for background: " << bg << std::endl; return EXIT_FAILURE; } //XXX if we decide to rebin, do it here bgDTA->Divide(dtaHist); for (Int_t bin = 1; bin <= bgDTA->GetNbinsX(); ++bin) { if( bgDTA->GetBinContent(bin)==0. ){bgDTA->SetBinContent(bin,1.);} } TString jsonFilename = Form("run%iDTAsplines.json",runNo); auto dtBkgndCorrSpline = Lau1DCubicSpline::readFromJson(jsonFilename,histname); std::unique_ptr> bgDTAModel; if (sigDTA==nullptr){ std::cout << "Null pointer" << std::endl; } if (settings.directory == "B2Dhh_Kpi"){ bgDTAModel = std::make_unique>( Form("%s_DTA_ratio_Run%u_Kpi",bg.Data(), runNo), *sigDTA, std::move( dtBkgndCorrSpline ) ); } else if (settings.directory == "B2Dhh_KK"){ bgDTAModel = std::make_unique>( Form("%s_DTA_ratio_Run%u_KK",bg.Data(), runNo), *sigDTA, std::move( dtBkgndCorrSpline ) ); } else if (settings.directory == "B2Dhh_pipi"){ bgDTAModel = std::make_unique>( Form("%s_DTA_ratio_Run%u_pipi",bg.Data(),runNo), *sigDTA, std::move( dtBkgndCorrSpline ) ); } //bgDTAModel->fitToTH1(bgDTA, LauOutputLevel::Quiet, true); backgroundPdf->setSplineEfficiency( std::move(bgDTAModel) ); fitModel->setBkgndDtPdf(bg,backgroundPdf); } } // Signal and bkgnd yields Double_t nSigEvents{0}; Double_t nCombEvents{0}; std::map bkgndYields; if (settings.directory == "B2Dhh_Kpi") { nSigEvents = (runNo == 1) ? 28362 : 115032; nCombEvents = (runNo == 1) ? 2415 : 7637; if ( ! settings.scaleYields ) { nCombEvents += (runNo == 1) ? (303 + 111 + 123 + 143 + 2) : (974 + 390 + 256 + 389 + 4); } bkgndYields = {{"comb", nCombEvents }, {"Bs2DKpi", runNo == 1 ? 796 : 1713 }, {"Bd2DKpi", runNo == 1 ? 254 : 639 }, {"Lb2Dppi", runNo == 1 ? 245 : 304 }}; } else if (settings.directory == "B2Dhh_KK"){ if (settings.setNegToZero and runNo == 1) { // Mass fit with mis-ID yields fixed to 0 nSigEvents = 3098; nCombEvents = 800; if( ! settings.scaleYields){ nCombEvents += 20 + 22 + 53 + 39 + 0; } bkgndYields = {{"comb", nCombEvents }, {"Bs2DKpi", 0 }, {"Bd2DKpi", 0 }, {"Lb2Dppi", 0 }}; } else if (settings.constrainedYields and runNo == 1) { // Mass fit with mis-ID yields constrained based on // Run1/Run2 in Kpi and pipi and Run2 yield of KK nSigEvents = 3093; nCombEvents = 777; if( ! settings.scaleYields){ nCombEvents += 19 + 21 + 52 + 41 + 0; } bkgndYields = {{"comb", nCombEvents }, {"Bs2DKpi", 19 }, {"Bd2DKpi", 6 }, {"Lb2Dppi", 7 }}; } else { // Original mass fit nSigEvents = runNo == 1 ? 3140 : 12099; nCombEvents = (runNo == 1) ? 1016 : 1501; if ( ! settings.scaleYields ) { nCombEvents += (runNo == 1) ? (63 + 23 + 26 + 13 + 1) : (107 + 43 + 28 + 5 + 1); } bkgndYields = {{"comb", nCombEvents }, {"Bs2DKpi", runNo == 1 ? -172 : 167 }, {"Bd2DKpi", runNo == 1 ? -64 : 73 }, {"Lb2Dppi", runNo == 1 ? -61 : 33 }}; } } else if (settings.directory == "B2Dhh_pipi"){ nSigEvents = runNo == 1 ? 1175 : 4566; nCombEvents = (runNo == 1) ? 115 : 301; if ( ! settings.scaleYields ) { nCombEvents += (runNo == 1) ? (7 + 3 + 3 + 79 + 0) : (36 + 15 + 10 + 138 + 1); } bkgndYields = {{"comb", nCombEvents }, {"Bs2DKpi", runNo == 1 ? 57 : 99 }, {"Bd2DKpi", runNo == 1 ? 18 : 41 }, {"Lb2Dppi", runNo == 1 ? 21 : 19 }}; } else {std::cerr << "bad dir given!" << std::endl; return EXIT_FAILURE;} if(settings.command == Command::Generate) { for(auto& [mode, yield] : bkgndYields) { if(yield < 0.) { std::cerr << "WARNING : setting negative yield for mode " << mode << " to 0 for generation" << std::endl; yield = 0.; } } } const Bool_t fixYields { settings.bkgndList.empty() or not settings.floatYields }; TString yieldName; for (auto& [bg, _] : BkgndTypes) { const Double_t nBkg = bkgndYields[bg]; std::cout<setNBkgndEvents( nBkgndEvents ); } std::cout<<"nSigEvents = "<setNSigEvents(nSigPar); if ( settings.command == Command::Generate or not fixYields ) { // mB PDFs // PDFs for B+ and B- are set to be identical here but could be different in general const TString mBName { "mB" }; const Double_t mBMin { 5.200 }; const Double_t mBMax { 5.700 }; // Signal PDF is a double Crystal Ball function const std::map sigParVals { {"mB_sig_mean", 5.2802}, {"mB_sig_sigma", 0.01125}, {"mB_sig_alphaL", 1.226}, {"mB_sig_alphaR", -1.722}, {"mB_sig_orderL", 1.817}, {"mB_sig_orderR", 2.861}, {"mB_sig_frac", 0.480} }; fitModel->setSignalPdfs( makeDCBPDF( mBName, mBMin, mBMax, "sig", sigParVals ) ); // Combinatorial PDF is an exponential if ( BkgndTypes.find("comb") != BkgndTypes.end() ) { LauParameter* mB_comb_slope = new LauParameter("mB_comb_slope", -0.00419); LauAbsPdf* mB_comb_pdf = new LauExponentialPdf(mBName, {mB_comb_slope}, mBMin, mBMax); fitModel->setBkgndPdf("comb", mB_comb_pdf); } // Bd2DKpi PDF is double Crystal Ball function if ( BkgndTypes.find("Bd2DKpi") != BkgndTypes.end() ) { const std::map Bd2DKpiParVals { {"mB_Bd2DKpi_mean", 5.2401}, {"mB_Bd2DKpi_sigma", 0.01580}, {"mB_Bd2DKpi_alphaL", 0.560}, {"mB_Bd2DKpi_alphaR", -2.212}, {"mB_Bd2DKpi_orderL", 1.076}, {"mB_Bd2DKpi_orderR", 2.453}, {"mB_Bd2DKpi_frac", 0.544} }; fitModel->setBkgndPdf("Bd2DKpi", makeDCBPDF( mBName, mBMin, mBMax, "Bd2DKpi", Bd2DKpiParVals ) ); } // Bs2DKpi PDF is double Crystal Ball function if ( BkgndTypes.find("Bs2DKpi") != BkgndTypes.end() ) { const std::map Bs2DKpiParVals { {"mB_Bs2DKpi_mean", 5.3244}, {"mB_Bs2DKpi_sigma", 0.01827}, {"mB_Bs2DKpi_alphaL", 0.865}, {"mB_Bs2DKpi_alphaR", -0.001}, {"mB_Bs2DKpi_orderL", 10.000}, {"mB_Bs2DKpi_orderR", 10.000}, {"mB_Bs2DKpi_frac", 0.954} }; fitModel->setBkgndPdf("Bs2DKpi", makeDCBPDF( mBName, mBMin, mBMax, "Bs2DKpi", Bs2DKpiParVals ) ); } // Lb2Dppi PDF is double Crystal Ball function if ( BkgndTypes.find("Lb2Dppi") != BkgndTypes.end() ) { const std::map Lb2DppiParVals { {"mB_Lb2Dppi_mean", 5.4884}, {"mB_Lb2Dppi_sigma", 0.02561}, {"mB_Lb2Dppi_alphaL", 0.091}, {"mB_Lb2Dppi_alphaR", -18.053}, {"mB_Lb2Dppi_orderL", 2.906}, {"mB_Lb2Dppi_orderR", 6.087}, {"mB_Lb2Dppi_frac", 0.968} }; fitModel->setBkgndPdf("Lb2Dppi", makeDCBPDF( mBName, mBMin, mBMax, "Lb2Dppi", Lb2DppiParVals ) ); } } // set the number of experiments if (settings.command == Command::Generate) { fitModel->setNExpts(settings.nExptGen, settings.firstExptGen); } else { fitModel->setNExpts(settings.nExptFit, settings.firstExptFit); } fitModel->useAsymmFitErrors(kFALSE); fitModel->useRandomInitFitPars(kFALSE); fitModel->writeLatexTable(kFALSE); const Bool_t haveBkgnds = ( fitModel->nBkgndClasses() > 0 ); fitModel->doPoissonSmearing(haveBkgnds); fitModel->doEMLFit(haveBkgnds); TString dTypeStr; switch (settings.dType) { case LauTimeDepFitModel::CPEigenvalue::CPEven : dTypeStr = "CPEven"; break; case LauTimeDepFitModel::CPEigenvalue::CPOdd : dTypeStr = "CPOdd"; break; case LauTimeDepFitModel::CPEigenvalue::QFS : dTypeStr = "QFS"; break; } TString dataFile(""); TString treeName("fitTree"); TString rootFileName(""); TString tableFileName(""); TString fitToyFileName(""); TString splotFileName(""); dataFile = "TEST-Dpipi"; dataFile += "_"+dTypeStr+"_"+settings.directory; switch(settings.timeEffModel) { case LauDecayTime::EfficiencyMethod::Spline: dataFile += "_Spline"; break; case LauDecayTime::EfficiencyMethod::Binned: dataFile += "_Binned"; break; case LauDecayTime::EfficiencyMethod::Uniform: dataFile += "_Uniform"; break; } if (settings.timeResolution) { if (settings.perEventTimeErr) { dataFile += "_DTRperevt"; } else { dataFile += "_DTRavg"; } } else { dataFile += "_DTRoff"; } dataFile += "_expts"; dataFile += settings.firstExptGen; dataFile += "-"; dataFile += settings.firstExptGen+settings.nExptGen-1; dataFile += Form("-Run%u",runNo); dataFile += ".root"; if (settings.dataFit) { if(runNo == 2) { dataFile = Form("%s/%s/Run2/B2Dhh-CollisionCombined-MagCombined-Stripping24r2-withDNN-withPIDcorr-withMVA-withPIDMVA-WithDpiMatching-withSwappedMassHypotheses-withKKvetoes-withSelection-LauraPrepped.root",settings.eosRoot.c_str(),settings.eosDataDir.c_str()); } else { dataFile = Form("%s/%s/Run1/B2Dhh-CollisionCombined-MagCombined-Stripping21r1p2-withDNN-withPIDcorr-withMVA-withPIDMVA-WithDpiMatching-withSwappedMassHypotheses-withKKvetoes-withSelection-LauraPrepped.root",settings.eosRoot.c_str(),settings.eosDataDir.c_str()); } treeName = Form("%s/B2DhhReco",settings.directory.data()); } if (settings.command == Command::Generate) { rootFileName = "dummy.root"; tableFileName = "genResults"; } else { rootFileName = "fit"; rootFileName += settings.iFit; rootFileName += "_Results_"; rootFileName += dTypeStr; rootFileName += "_"; rootFileName += settings.directory; rootFileName += "_expts"; rootFileName += settings.firstExptFit; rootFileName += "-"; rootFileName += settings.firstExptFit+settings.nExptFit-1; rootFileName += Form("_Run%u",runNo); rootFileName += ".root"; tableFileName = "fit"; tableFileName += settings.iFit; tableFileName += "_Results_"; tableFileName += dTypeStr; tableFileName += "_"; tableFileName += settings.directory; tableFileName += "_expts"; tableFileName += settings.firstExptFit; tableFileName += "-"; tableFileName += settings.firstExptFit+settings.nExptFit-1; tableFileName += Form("_Run%u",runNo); fitToyFileName = "fit"; fitToyFileName += settings.iFit; fitToyFileName += "_ToyMC_"; fitToyFileName += dTypeStr; fitToyFileName += "_"; fitToyFileName += settings.directory; fitToyFileName += "_expts"; fitToyFileName += settings.firstExptFit; fitToyFileName += "-"; fitToyFileName += settings.firstExptFit+settings.nExptFit-1; fitToyFileName += Form("_Run%u",runNo); fitToyFileName += ".root"; splotFileName = "fit"; splotFileName += settings.iFit; splotFileName += "_sPlot_"; splotFileName += dTypeStr; splotFileName += "_"; splotFileName += settings.directory; splotFileName += "_expts"; splotFileName += settings.firstExptFit; splotFileName += "-"; splotFileName += settings.firstExptFit+settings.nExptFit-1; splotFileName += Form("_Run%u",runNo); splotFileName += ".root"; } if( settings.genToy and not settings.blindFit ) { fitModel->compareFitData(50, fitToyFileName); } // Generate toy from the fitted parameters //fitModel->compareFitData(1, fitToyFileName); // Write out per-event likelihoods and sWeights //fitModel->writeSPlotData(splotFileName, "splot", kFALSE); // Execute the generation/fit switch (settings.command) { case Command::Generate : fitModel->run( "gen", dataFile, treeName, rootFileName, tableFileName ); break; case Command::Fit : fitModel->run( "fit", dataFile, treeName, rootFileName, tableFileName ); break; case Command::SimFit : fitModel->runTask( dataFile, treeName, rootFileName, tableFileName, "localhost", settings.port ); break; } return EXIT_SUCCESS; } LauAbsPdf* makeDCBPDF( const TString& varName, const Double_t varMin, const Double_t varMax, const TString& componentName, const std::map paramVals ) { const TString meanName { Form( "%s_%s_mean", varName.Data(), componentName.Data() ) }; const TString sigmaName { Form( "%s_%s_sigma", varName.Data(), componentName.Data() ) }; const TString alphaLName { Form( "%s_%s_alphaL", varName.Data(), componentName.Data() ) }; const TString alphaRName { Form( "%s_%s_alphaR", varName.Data(), componentName.Data() ) }; const TString orderLName { Form( "%s_%s_orderL", varName.Data(), componentName.Data() ) }; const TString orderRName { Form( "%s_%s_orderR", varName.Data(), componentName.Data() ) }; const TString fracName { Form( "%s_%s_frac", varName.Data(), componentName.Data() ) }; LauParameter* mean = new LauParameter( meanName, paramVals.at( meanName ) ); LauParameter* sigma = new LauParameter( sigmaName, paramVals.at( sigmaName ) ); LauParameter* alphaL = new LauParameter( alphaLName, paramVals.at( alphaLName ) ); LauParameter* alphaR = new LauParameter( alphaRName, paramVals.at( alphaRName ) ); LauParameter* orderL = new LauParameter( orderLName, paramVals.at( orderLName ) ); LauParameter* orderR = new LauParameter( orderRName, paramVals.at( orderRName ) ); LauParameter* frac = new LauParameter( fracName, paramVals.at( fracName ) ); std::vector pars; pars.reserve(4); pars.clear(); pars.push_back(mean); pars.push_back(sigma); pars.push_back(alphaL); pars.push_back(orderL); LauAbsPdf* pdf_L = new LauCrystalBallPdf(varName, pars, varMin, varMax); pars.clear(); pars.push_back(mean); pars.push_back(sigma); pars.push_back(alphaR); pars.push_back(orderR); LauAbsPdf* pdf_R = new LauCrystalBallPdf(varName, pars, varMin, varMax); LauAbsPdf* pdf = new LauSumPdf(pdf_L, pdf_R, frac); return pdf; } diff --git a/examples/runTimeDepTest.sh b/examples/runTimeDepTest.sh index 99d2fdd..383690e 100755 --- a/examples/runTimeDepTest.sh +++ b/examples/runTimeDepTest.sh @@ -1,242 +1,248 @@ #!/bin/bash #SBATCH --partition=epp #SBATCH --ntasks=1 #SBATCH --cpus-per-task=6 #SBATCH --mem-per-cpu=3997 #SBATCH --time=02:00:00 #Run with sbatch runTimeDepTest.sh # Copyright 2020 University of Warwick # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Laura++ package authors: # John Back # Paul Harrison # Thomas Latham processes=() # trap ^C and call ctrl_c() trap ctrl_c INT function ctrl_c() { echo " ** Caught ^C event!" for process in ${processes[@]} do if ps -u $USER | grep -q "$process" then echo "Killing : $process" kill $process fi done exit 1 } # Check that we have a kerberos token, since nothing will work without one! klist -s > /dev/null 2>&1 if [ $? -ne 0 ] then echo "You don't have a valid Kerberos ticket!" exit 1 fi iFit=0 dta_model="spline" dtr=1 dtr_perevent=0 fixTau=1 fixDeltaM=1 seed=140279 # $(pwd | xargs basename) dataFit=1 runNo=12 # Modify these if you want to run more experiments nExpt=1 firstExpt=0 logName_genQFS_run1=Logs/gen-QFS-run1-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent.log logName_genQFS_run2=Logs/gen-QFS-run2-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent.log logName_genCPE_KK_run1=Logs/gen-CPEvenKK-run1-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent.log logName_genCPE_KK_run2=Logs/gen-CPEvenKK-run2-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent.log logName_genCPE_pipi_run1=Logs/gen-CPEvenpipi-run1-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent.log logName_genCPE_pipi_run2=Logs/gen-CPEvenpipi-run2-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent.log logName_coord=Logs/coordinator-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent-$iFit.log logName_taskQFS_run1=Logs/task-QFS-run1-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent-$iFit.log logName_taskQFS_run2=Logs/task-QFS-run2-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent-$iFit.log logName_taskCPE_KK_run1=Logs/task-CPEvenKK-run1-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent-$iFit.log logName_taskCPE_KK_run2=Logs/task-CPEvenKK-run2-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent-$iFit.log logName_taskCPE_pipi_run1=Logs/task-CPEvenpipi-run1-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent-$iFit.log logName_taskCPE_pipi_run2=Logs/task-CPEvenpipi-run2-dtamodel_$dta_model-dtr_$dtr-dtrperevent_$dtr_perevent-$iFit.log mkdir -p Logs # make sure we have the most up-to-date json files eosArea="root://eoslhcb.cern.ch//eos/lhcb/wg/b2oc/B2D0pipi/timedep_fit_histos" -rm -f run1DTAsplines.json run2DTAsplines.json Bd2D0pipi_DP_Model_Coeffs.json +rm -f run1DTAsplines.json run2DTAsplines.json Bd2D0pipi_DP_Model_Coeffs.json Bd2D0pipi_DP_Vetoes.json xrdcp $eosArea/run1DTAsplines.json . if [ $? -ne 0 ] then echo "Problem downloading json files from EOS, exiting" exit 1 fi xrdcp $eosArea/run2DTAsplines.json . if [ $? -ne 0 ] then echo "Problem downloading json files from EOS, exiting" exit 1 fi xrdcp $eosArea/Bd2D0pipi_DP_Model_Coeffs.json . if [ $? -ne 0 ] then echo "Problem downloading json files from EOS, exiting" exit 1 fi +xrdcp $eosArea/Bd2D0pipi_DP_Vetoes.json . +if [ $? -ne 0 ] +then + echo "Problem downloading json files from EOS, exiting" + exit 1 +fi if [ $iFit == 0 -a $dataFit == 0 ] then #run generation of the modes in parallel (put the tasks in the bg) echo "Generating samples..." if [ $runNo -eq 1 -o $runNo -eq 12 ] then GenFitTimeDep_Bd2Dpipi gen --dtype QFS --sigOnly $sigOnly --fixTau $fixTau --seed $seed --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExptGen $firstExpt --nExptGen $nExpt --run 1 --dir B2Dhh_Kpi > $logName_genQFS_run1 2>&1 & task=$! processes+=( $task ) GenFitTimeDep_Bd2Dpipi gen --dtype CPEven --sigOnly $sigOnly --fixTau $fixTau --seed $seed --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExptGen $firstExpt --nExptGen $nExpt --run 1 --dir B2Dhh_KK > $logName_genCPE_KK_run1 2>&1 & task=$! processes+=( $task ) GenFitTimeDep_Bd2Dpipi gen --dtype CPEven --sigOnly $sigOnly --fixTau $fixTau --seed $seed --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExptGen $firstExpt --nExptGen $nExpt --run 1 --dir B2Dhh_pipi > $logName_genCPE_pipi_run1 2>&1 & task=$! processes+=( $task ) fi if [ $runNo -eq 2 -o $runNo -eq 12 ] then GenFitTimeDep_Bd2Dpipi gen --dtype QFS --sigOnly $sigOnly --fixTau $fixTau --seed $seed --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExptGen $firstExpt --nExptGen $nExpt --run 2 --dir B2Dhh_Kpi > $logName_genQFS_run2 2>&1 & task=$! processes+=( $task ) GenFitTimeDep_Bd2Dpipi gen --dtype CPEven --sigOnly $sigOnly --fixTau $fixTau --seed $seed --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExptGen $firstExpt --nExptGen $nExpt --run 2 --dir B2Dhh_KK > $logName_genCPE_KK_run2 2>&1 & task=$! processes+=( $task ) GenFitTimeDep_Bd2Dpipi gen --dtype CPEven --sigOnly $sigOnly --fixTau $fixTau --seed $seed --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExptGen $firstExpt --nExptGen $nExpt --run 2 --dir B2Dhh_pipi > $logName_genCPE_pipi_run2 2>&1 & task=$! processes+=( $task ) fi wait #wait for the generation to complete fi echo "Running fit $iFit" if [ $runNo -eq 12 ] then nTasks=6 else nTasks=3 fi SimFitCoordinator $iFit $nExpt $firstExpt $nTasks > $logName_coord 2>&1 & task=$! echo "SimFitCoordinator process : $task" processes+=( $task ) echo "Initialised coordinator process for fit $iFit" sleep 5 port="" while [ "x$port" == "x" ] do sleep 2 port=`tail $logName_coord | grep "Waiting for connection" | awk '{print $NF}'` done echo "Coordinator is listening on port $port" echo "Initialising tasks..." if [ $runNo -eq 1 -o $runNo -eq 12 ] then GenFitTimeDep_Bd2Dpipi simfit --blindFit $dataFit --dataFit $dataFit --dtype QFS --seed $seed --port $port --iFit $iFit --fixTau $fixTau --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExpt $firstExpt --nExpt $nExpt --firstExptGen $firstExpt --nExptGen $nExpt --run 1 --dir B2Dhh_Kpi --fixDm $fixDeltaM > $logName_taskQFS_run1 2>&1 & task=$! echo "QFS Run 1 process : $task" processes+=( $task ) sleep 5 GenFitTimeDep_Bd2Dpipi simfit --blindFit $dataFit --dataFit $dataFit --dtype CPEven --seed $seed --port $port --iFit $iFit --fixTau $fixTau --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExpt $firstExpt --nExpt $nExpt --firstExptGen $firstExpt --nExptGen $nExpt --run 1 --dir B2Dhh_KK --fixDm $fixDeltaM > $logName_taskCPE_KK_run1 2>&1 & task=$! echo "CP KK Run 1 process : $task" processes+=( $task ) sleep 5 GenFitTimeDep_Bd2Dpipi simfit --blindFit $dataFit --dataFit $dataFit --dtype CPEven --seed $seed --port $port --iFit $iFit --fixTau $fixTau --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExpt $firstExpt --nExpt $nExpt --firstExptGen $firstExpt --nExptGen $nExpt --run 1 --dir B2Dhh_pipi --fixDm $fixDeltaM > $logName_taskCPE_pipi_run1 2>&1 & task=$! echo "CP pipi Run 1 process : $task" processes+=( $task ) fi if [ $runNo -eq 2 -o $runNo -eq 12 ] then GenFitTimeDep_Bd2Dpipi simfit --blindFit $dataFit --dataFit $dataFit --dtype QFS --seed $seed --port $port --iFit $iFit --fixTau $fixTau --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExpt $firstExpt --nExpt $nExpt --firstExptGen $firstExpt --nExptGen $nExpt --run 2 --dir B2Dhh_Kpi --fixDm $fixDeltaM > $logName_taskQFS_run2 2>&1 & task=$! echo "QFS Run 2 process : $task" processes+=( $task ) sleep 5 GenFitTimeDep_Bd2Dpipi simfit --blindFit $dataFit --dataFit $dataFit --dtype CPEven --seed $seed --port $port --iFit $iFit --fixTau $fixTau --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExpt $firstExpt --nExpt $nExpt --firstExptGen $firstExpt --nExptGen $nExpt --run 2 --dir B2Dhh_KK --fixDm $fixDeltaM > $logName_taskCPE_KK_run2 2>&1 & task=$! echo "CP KK Run 2 process : $task" processes+=( $task ) sleep 5 GenFitTimeDep_Bd2Dpipi simfit --blindFit $dataFit --dataFit $dataFit --dtype CPEven --seed $seed --port $port --iFit $iFit --fixTau $fixTau --dta-model $dta_model --dtr $dtr --dtr-perevent $dtr_perevent --firstExpt $firstExpt --nExpt $nExpt --firstExptGen $firstExpt --nExptGen $nExpt --run 2 --dir B2Dhh_pipi --fixDm $fixDeltaM > $logName_taskCPE_pipi_run2 2>&1 & task=$! echo "CP pipi Run 2 process : $task" processes+=( $task ) fi wait #wait for the fits and coordinator to terminate echo "Fit completed" echo "Warnings/Errors from coordinator:" grep -e ERROR -e WARNING -e Error -e Warning $logName_coord echo "Warnings/Errors from CPeven KK tasks:" if [ $runNo -eq 1 -o $runNo -eq 12 ] then grep -e ERROR -e WARNING -e Error -e Warning $logName_taskCPE_KK_run1 fi if [ $runNo -eq 2 -o $runNo -eq 12 ] then grep -e ERROR -e WARNING -e Error -e Warning $logName_taskCPE_KK_run2 fi echo "Warnings/Errors from CPeven pipi tasks:" if [ $runNo -eq 1 -o $runNo -eq 12 ] then grep -e ERROR -e WARNING -e Error -e Warning $logName_taskCPE_pipi_run1 fi if [ $runNo -eq 2 -o $runNo -eq 12 ] then grep -e ERROR -e WARNING -e Error -e Warning $logName_taskCPE_pipi_run2 fi echo "Warnings/Errors from QFS tasks:" if [ $runNo -eq 1 -o $runNo -eq 12 ] then grep -e ERROR -e WARNING -e Error -e Warning $logName_taskQFS_run1 fi if [ $runNo -eq 2 -o $runNo -eq 12 ] then grep -e ERROR -e WARNING -e Error -e Warning $logName_taskQFS_run2 fi diff --git a/inc/LauAbsBkgndDPModel.hh b/inc/LauAbsBkgndDPModel.hh index d111925..c14da6f 100644 --- a/inc/LauAbsBkgndDPModel.hh +++ b/inc/LauAbsBkgndDPModel.hh @@ -1,193 +1,187 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauAbsBkgndDPModel.hh \brief File containing declaration of LauAbsBkgndDPModel class. */ /*! \class LauAbsBkgndDPModel \brief The abstract interface for a background Dalitz plot model Class which defines the abstract interface for a background Dalitz plot model */ #ifndef LAU_ABS_BKGND_DP_MODEL #define LAU_ABS_BKGND_DP_MODEL #include "Rtypes.h" class LauDaughters; class LauKinematics; class LauFitDataTree; class LauVetoes; class LauAbsBkgndDPModel { public: //! Constructor /*! \param [in] daughters the daughter particles \param [in] vetoes the vetoes within the Dalitz plot */ - LauAbsBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes); + LauAbsBkgndDPModel(LauDaughters* daughters, const LauVetoes* vetoes); //! Copy constructor (deleted) LauAbsBkgndDPModel(const LauAbsBkgndDPModel& rhs) = delete; //! Copy assignment operator (deleted) LauAbsBkgndDPModel& operator=(const LauAbsBkgndDPModel& rhs) = delete; //! Move constructor LauAbsBkgndDPModel(LauAbsBkgndDPModel&& rhs) = default; //! Move assignment operator LauAbsBkgndDPModel& operator=(LauAbsBkgndDPModel&& rhs) = default; //! Destructor virtual ~LauAbsBkgndDPModel() = default; //! Initialise the model virtual void initialise() = 0; //! Generate a toy MC event from the model /*! \return the kinematics object */ virtual LauKinematics* generate() = 0; //! Generate a point based on a uniform distribution in the regular/square DP /*! \return the kinematics object */ virtual LauKinematics* genUniformPoint() = 0; //! Get PDF normalisation constant /*! \return the PDF normalisation constant */ virtual Double_t getPdfNorm() const = 0; //! Get maximum PDF height /*! \return the PDF maximum height */ virtual Double_t getMaxHeight() const = 0; //! Get the raw unnormalised function value for the current event /*! Specifically, compared with getUnNormValue, this value does not account for any transform Jacobian \return the raw unnormalised function value */ virtual Double_t getRawValue() const = 0; //! Get the unnormalised likelihood value for the current event /*! \return the unnormalised likelihood values */ virtual Double_t getUnNormValue() const = 0; //! Get unnormalised likelihood for a given event /*! \param [in] iEvt the event number \return the unnormalised likelihood value */ virtual Double_t getUnNormValue(const UInt_t iEvt) = 0; //! Get likelihood (unnormalised likelihood value / PDF normalisation) for a given event /*! \param [in] iEvt the event number \return the likelihood value */ virtual Double_t getLikelihood(const UInt_t iEvt) = 0; //! Calculate the likelihood value for a given set of Dalitz plot coordinates /*! \param [in] kinematics the DP kinematics */ virtual void calcLikelihoodInfo(const LauKinematics* kinematics) = 0; //! Cache the input data and (if appropriate) the per-event likelihood values /*! \param [in] fitDataTree the input data */ virtual void fillDataTree(const LauFitDataTree& fitDataTree) = 0; //! Get the daughter particles /*! \return the daughters */ const LauDaughters* getDaughters() const {return daughters_;} protected: //! Set data event number /*! \param [in] iEvt the event number */ virtual void setDataEventNo(const UInt_t iEvt) = 0; //! Get the daughter particles /*! \return the daughters */ LauDaughters* getDaughters() {return daughters_;} //! Get the Dalitz plot kinematics /*! \return the kinematics */ const LauKinematics* getKinematics() const {return kinematics_;} //! Get the Dalitz plot kinematics /*! \return the kinematics */ LauKinematics* getKinematics() {return kinematics_;} //! Get vetoes in the Dalitz plot /*! \return the vetoes */ const LauVetoes* getVetoes() const {return vetoes_;} - //! Get vetoes in the Dalitz plot - /*! - \return the vetoes - */ - LauVetoes* getVetoes() {return vetoes_;} - private: //! The daughter particles LauDaughters* daughters_{nullptr}; //! Dalitz plot kinematics LauKinematics* kinematics_{nullptr}; //! Vetoes within the Dalitz plot - LauVetoes* vetoes_{nullptr}; + const LauVetoes* vetoes_{nullptr}; ClassDef(LauAbsBkgndDPModel,0) // Abstract DP background model }; #endif diff --git a/inc/LauBkgndDPModel.hh b/inc/LauBkgndDPModel.hh index 4711674..59e030f 100644 --- a/inc/LauBkgndDPModel.hh +++ b/inc/LauBkgndDPModel.hh @@ -1,210 +1,210 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauBkgndDPModel.hh \brief File containing declaration of LauBkgndDPModel class. */ /*! \class LauBkgndDPModel \brief Class for defining a histogram-based background Dalitz plot model Class for defining a histogram-based background Dalitz plot model */ #ifndef LAU_BKGND_DP_MODEL #define LAU_BKGND_DP_MODEL #include "LauAbsBkgndDPModel.hh" #include #include class TH2; class Lau2DAbsDPPdf; class LauDaughters; class LauFitDataTree; class LauVetoes; class LauBkgndDPModel : public LauAbsBkgndDPModel { public: //! Constructor /*! \param [in] daughters the daughters in the decay \param [in] vetoes the vetoes in the Datliz plot */ - LauBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes); + LauBkgndDPModel(LauDaughters* daughters, const LauVetoes* vetoes); //! Set background histogram /*! \param [in] histo the 2D histogram describing the DP distribution \param [in] useInterpolation boolean flag to determine whether linear interpolation between bins should be used or simply the raw bin values \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. The seed for the random number generator used to fluctuate the bins should first be set using LauRandom::setSeed. \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates */ void setBkgndHisto(const TH2* histo, const Bool_t useInterpolation, const Bool_t fluctuateBins = kFALSE, const Bool_t useUpperHalfOnly = kFALSE, const Bool_t squareDP = kFALSE); //! Set the background histogram and generate a spline /*! \param [in] histo the 2D histogram describing the DP distribution \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. The seed for the random number generator used to fluctuate the bins should first be set using LauRandom::setSeed. \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates */ void setBkgndSpline(const TH2* histo, const Bool_t fluctuateBins = kFALSE, const Bool_t useUpperHalfOnly = kFALSE, const Bool_t squareDP = kFALSE); //! Initialise the model void initialise() override; //! Generate a toy MC event from the model /*! \return the kinematics object */ LauKinematics* generate() override; //! Generate a point based on a uniform distribution in the regular/square DP /*! \return the kinematics object */ LauKinematics* genUniformPoint() override; //! Get PDF normalisation constant /*! \return the PDF normalisation constant */ Double_t getPdfNorm() const override {return pdfNorm_;} //! Get maximum histogram height /*! \return the maximum height of the histogram */ Double_t getMaxHeight() const override {return maxHeight_;} //! Get the raw unnormalised histogram value for the current event /*! Specifically, compared with getUnNormValue, this value does not account for any transform Jacobian \return the raw unnormalised histogram value */ Double_t getRawValue() const override {return curEvtHistVal_;} //! Get the unnormalised likelihood value for the current event /*! \return the unnormalised likelihood value */ Double_t getUnNormValue() const override {return curEvtPdfVal_;} //! Get unnormalised likelihood for a given event /*! \param [in] iEvt the event number \return the unnormalised likelihood value */ Double_t getUnNormValue(const UInt_t iEvt) override; //! Get likelihood (unnormalised likelihood value / PDF normalisation) for a given event /*! \param [in] iEvt the event number \return the likelihood value */ Double_t getLikelihood(const UInt_t iEvt) override; //! Calculate the likelihood value for a given set of Dalitz plot coordinates /*! \param [in] kinematics the DP kinematics */ void calcLikelihoodInfo(const LauKinematics* kinematics) override; //! Cache the input data and (if appropriate) the per-event likelihood values /*! \param [in] fitDataTree the input data */ void fillDataTree(const LauFitDataTree& fitDataTree) override; protected: //! Obtain the histogram value at a given point /*! \param [in] xVal the x-value \param [in] yVal the y-value \return the histogram value */ Double_t getHistValue(const Double_t xVal, const Double_t yVal); //! Check that the histogram limits are reasonable /*! \param [in] histo the histogram to be checked */ Bool_t checkHistoLimits(const TH2* histo); //! Set data event number /*! \param [in] iEvt the event number */ void setDataEventNo(const UInt_t iEvt) override; private: //! Flags whether the DP is symmetrical or not Bool_t symmetricalDP_{kFALSE}; //! Flags whether or not to work in square DP coordinates Bool_t squareDP_{kFALSE}; //! PDF of Dalitz plot background, from a 2D histogram std::unique_ptr bgHistDPPdf_{nullptr}; //! Cached histogram values for each event std::vector bgData_; //! Histogram value for the current event Double_t curEvtHistVal_{0.0}; //! PDF value for the current event Double_t curEvtPdfVal_{0.0}; //! Maximum height of PDF Double_t maxHeight_{1.0}; //! Normalisation of PDF Double_t pdfNorm_{1.0}; //! Boolean to indicate if the warning that there is no histogram has already been issued Bool_t doneGenWarning_{kFALSE}; //! Flag to track whether a warning has been issued for bin values less than zero mutable Bool_t lowBinWarningIssued_{kFALSE}; ClassDefOverride(LauBkgndDPModel,0) }; #endif diff --git a/inc/LauJsonTools.hh b/inc/LauJsonTools.hh index 871b29b..69f88bd 100644 --- a/inc/LauJsonTools.hh +++ b/inc/LauJsonTools.hh @@ -1,152 +1,162 @@ /* Copyright 2023 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ #ifndef LAU_JSON_TOOLS_HH #define LAU_JSON_TOOLS_HH #include #include #include /*! \file LauJsonTools.hh \brief File containing LauJsonTools namespace */ /*! \namespace LauJsonTools \brief Namespace containing tools for reading/writing JSON files */ namespace LauJsonTools { /*! \enum JsonType \brief Define types that can be found in JSON fields and that we can handle */ enum class JsonType { Null, //!< JSON value is null Object, //!< JSON value is an object Array, //!< JSON value is an array String, //!< JSON value is a string Boolean, //!< JSON value is a boolean Number_Integer, //!< JSON value is an integer (signed or unsigned) Number_Unsigned, //!< JSON value is an unsigned integer Number_Float, //!< JSON value is a floating point value Number, //!< JSON value is any number (integer, unsigned or float) Primitive, //!< JSON value is any primative type (null, string, boolean, integer, unsigned, float) Structured, //!< JSON value is any structured type (object, array) Any //!< JSON value is any of the above }; //! Typedef to define a combination of a JSON element's name and type using ElementNameType = std::pair< std::string, JsonType >; //! Exception object to be thrown in case of a missing element class MissingJsonElement : public std::runtime_error { public: //! Constructor /*! \param [in] what the message explaining the precise error that has occurred */ MissingJsonElement(const std::string& what) : std::runtime_error(what) {} }; + //! Exception object to be thrown in case of malformed JSON + class InvalidJson : public std::runtime_error { + public: + //! Constructor + /*! + \param [in] what the message explaining the precise error that has occurred + */ + InvalidJson(const std::string& what) : std::runtime_error(what) {} + }; + //! Deserialise a JSON file to a JSON value /*! \param [in] fileName the name of the file to read \param [in] elementName the optional name of the element within the root object to retrieve - by default retrieve the root structure \param [in] expectedType the expected type of the value */ nlohmann::json readJsonFile(const std::string& fileName, const std::string& elementName = "", const JsonType expectedType = JsonType::Any); //! Serialise a JSON value to a JSON file /*! \param [in] value the JSON value to serialise \param [in] fileName the name of the file to be written \param [in] elementName the (optional) name of the JSON element to which the coefficients should be written \param [in] append if true, append the spline to the existing JSON within the file - if using this option it is then mandatory to provide elementName \param [in] indent the indentation level to use in the output in number of spaces (defaults to 4)\n From the nlohmann::json::dump documentation:\n If indent is nonnegative, then array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. A value of -1 selects the most compact representation. \return true if file successfully written, false otherwise */ bool writeJsonFile(const nlohmann::json& value, const std::string& fileName, const std::string& elementName = "", const bool append = false, const int indent = 4); //! Check that the type of a JSON value is as expected /*! \param [in] value the JSON value to check \param [in] expectedType the expected type of the value */ bool checkValueType(const nlohmann::json& value, const JsonType expectedType); //! Check that the expected JSON elements are present in the supplied object /*! \param [in] obj the JSON object to check \param [in] expectedElements the elements (names and types) that are expected \return true if all expected elements are present, false otherwise */ bool checkObjectElements( const nlohmann::json& obj, const std::vector& expectedElements ); //! Access the value of an element that should be present in an object (no checking is done) /*! \param [in] obj the JSON object to access \param [in] elementName the name of the element within the object to retrieve \return the value of the element */ template T getValue( const nlohmann::json& obj, const std::string& elementName ) { return obj.at( elementName ).get(); } //! Access an element that may or may not be present in an object /*! \param [in] obj the JSON object to access \param [in] elementName the name of the element within the object to retrieve \param [in] expectedType the expected type of the element \return a pointer to the element, or nullptr if it is not present */ const nlohmann::json* getOptionalElement( const nlohmann::json& obj, const std::string& elementName, const JsonType expectedType = JsonType::Any ); //! Access the value of an element that may or may not be present in an object /*! \param [in] obj the JSON object to access \param [in] elementName the name of the element within the object to retrieve \param [in] expectedType the expected type of the element \return the value of the element as a std::optional */ template std::optional getOptionalValue( const nlohmann::json& obj, const std::string& elementName, const JsonType expectedType = JsonType::Any ); } template std::optional LauJsonTools::getOptionalValue( const nlohmann::json& obj, const std::string& elementName, const JsonType expectedType ) { auto elem { getOptionalElement( obj, elementName, expectedType ) }; if ( ! elem ) { return {}; } return elem->get(); } #endif diff --git a/inc/LauVetoes.hh b/inc/LauVetoes.hh index 83fe5e0..987ac65 100644 --- a/inc/LauVetoes.hh +++ b/inc/LauVetoes.hh @@ -1,147 +1,144 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauVetoes.hh \brief File containing declaration of LauVetoes class. */ /*! \class LauVetoes \brief Class for defining vetoes within the Dalitz plot. Each veto is defined by an index corresponding to one of the three invariant mass-squared variables, and maximum and minimum values for the veto's range in the specified variable. Each mass-squared variable is numbered according to the index of the daughter not involved in the vetoed mass-squared variable. For example a veto in m12 squared would receive the index 3. Since v3r2, in the case of a symmetric DP, vetoes in m13 or m23 will automatically be symmetrised to the other variable (so only one needs to be specified). However, if one wants to veto a region from a symmetric DP in the already-symmetrised co-ordinates, the special indices 4 and 5 can be used that will apply the veto to mMin or mMax, respectively. */ #ifndef LAU_VETOES #define LAU_VETOES -#include - #include "Rtypes.h" +#include "TString.h" + +#include + +#include +#include class LauKinematics; -class LauVetoes { +class LauVetoes final { public: //! Constructor - LauVetoes(); - - //! Destructor - virtual ~LauVetoes(); - - //! Copy constructor - /*! - \param [in] other the object to be copied - */ - LauVetoes(const LauVetoes& other); - - //! Copy assignment operator - /*! - \param [in] other the object to be copied - \return the assigned vetoes object - */ - LauVetoes& operator=(const LauVetoes& other); + LauVetoes() = default; //! Add a veto to the Dalitz plot /*! \param [in] resPairAmpInt the index of the mass-squared variable to be vetoed (in the case of symmetric DPs the special indices 4 and 5 can be used to indicate that a veto should be applied to mMin or mMax) \param [in] minMass the minimum mass of the veto \param [in] maxMass the maximum mass of the veto */ - void addMassVeto(const Int_t resPairAmpInt, const Double_t minMass, const Double_t maxMass); + void addMassVeto(const UInt_t resPairAmpInt, const Double_t minMass, const Double_t maxMass); //! Add a veto to the Dalitz plot /*! \param [in] resPairAmpInt the index of the mass-squared variable to be vetoed (in the case of symmetric DPs the special indices 4 and 5 can be used to indicate that a veto should be applied to mMinSq or mMaxSq) \param [in] minMassSq the minimum mass-squared of the veto \param [in] maxMassSq the maximum mass-squared of the veto */ - void addMassSqVeto(const Int_t resPairAmpInt, const Double_t minMassSq, const Double_t maxMassSq); + void addMassSqVeto(const UInt_t resPairAmpInt, const Double_t minMassSq, const Double_t maxMassSq); //! Check whether the specified Dalitz plot point passes the vetoes /*! \param [in] kinematics a point in the Dalitz plot \return true if the specified Dalitz plot point is outside all veto regions, false otherwise */ Bool_t passVeto(const LauKinematics* kinematics) const; - protected: + //! Write the set of vetoes to a JSON file + /*! + \param [in] fileName the name of the file to which the JSON should be written + \param [in] elementName the (optional) name of the element in the JSON to which to write + \param [in] append if true, append to the existing JSON within the file - if using this option it is then mandatory to provide elementName + \param [in] indent the indentation level to use in the output in number of spaces (defaults to 4) + */ + void writeToJson(const TString& fileName, const TString& splineName = "", const bool append = false, const int indent = 4) const; + + //! Construct a new LauVetoes object based on values read from a json file + /*! + \param [in] fileName the name of the file from which the JSON should be read + \param [in] elementName the (optional) name of the element in the JSON from which to read + \return the newly constructed object + */ + static std::unique_ptr readFromJson(const TString& fileName, const TString& elementName = ""); + + private: //! Check whether the specified Dalitz plot point passes the vetoes /*! \param [in] m12Sq the mass-squared of the first and second daughters \param [in] m23Sq the mass-squared of the second and third daughters \param [in] m13Sq the mass-squared of the first and third daughters \param [in] symmetricDP is the DP symmetric \param [in] fullySymmetricDP is the DP fully symmetric \return true if the specified Dalitz plot point is outside all veto regions, false otherwise */ Bool_t passVeto(const Double_t m12Sq, const Double_t m23Sq, const Double_t m13Sq, const Bool_t symmetricDP, const Bool_t fullySymmetricDP) const; - //! Retrieve the number of vetoes + //! Check the validity of a veto /*! - \return the number of vetoes - */ - UInt_t getNVetoes() const {return nVetoes_;} - - //! Retrieve the index of the vetoed mass-squared variable for each veto - /*! - \return the index of the vetoed mass-squared variable for each veto - */ - const std::vector& getVetoPairs() const {return vetoPair_;} - - //! Retrieve the minimum mass-squared for each veto - /*! - \return the minimum mass-squared for each veto + \param [in] resPairAmpInt the index of the mass-squared variable to be vetoed (in the case of symmetric DPs the special indices 4 and 5 can be used to indicate that a veto should be applied to mMinSq or mMaxSq) + \param [in] minMassSq the minimum mass-squared of the veto + \param [in] maxMassSq the maximum mass-squared of the veto */ - const std::vector& getVetoMinMass() const {return vetoMinMass_;} + Bool_t checkVeto(const UInt_t resPairAmpInt, const Double_t minMassSq, const Double_t maxMassSq) const; - //! Retrieve the maximum mass-squared for each veto + //! Print the veto info at a given index /*! - \return the maximum mass-squared for each veto + \param [in] index the index of the veto */ - const std::vector& getVetoMaxMass() const {return vetoMaxMass_;} + void printVeto(const std::size_t index) const; - private: - //! The number of vetoes - UInt_t nVetoes_; + //! Routine to check validity of all vetoes after loading from JSON + void checkValidity(); //! The index of the vetoed mass-squared variable for each veto - std::vector vetoPair_; + std::vector vetoPair_; //! The minimum mass-squared for each veto std::vector vetoMinMass_; //! The maximum mass-squared for each veto std::vector vetoMaxMass_; + // enable JSON serialisation of this class + NLOHMANN_DEFINE_TYPE_INTRUSIVE(LauVetoes, vetoPair_, vetoMinMass_, vetoMaxMass_) + ClassDef(LauVetoes,0) // Vetoes in the Dalitz plot }; #endif diff --git a/src/LauAbsBkgndDPModel.cc b/src/LauAbsBkgndDPModel.cc index d9514dd..64ce709 100644 --- a/src/LauAbsBkgndDPModel.cc +++ b/src/LauAbsBkgndDPModel.cc @@ -1,41 +1,41 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauAbsBkgndDPModel.cc \brief File containing implementation of LauAbsBkgndDPModel class. */ #include "LauAbsBkgndDPModel.hh" #include "LauDaughters.hh" ClassImp(LauAbsBkgndDPModel) -LauAbsBkgndDPModel::LauAbsBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes) : +LauAbsBkgndDPModel::LauAbsBkgndDPModel(LauDaughters* daughters, const LauVetoes* vetoes) : daughters_{daughters}, kinematics_{daughters ? daughters->getKinematics() : nullptr}, vetoes_{vetoes} { } diff --git a/src/LauBkgndDPModel.cc b/src/LauBkgndDPModel.cc index d862b0c..8085b23 100644 --- a/src/LauBkgndDPModel.cc +++ b/src/LauBkgndDPModel.cc @@ -1,378 +1,378 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauBkgndDPModel.cc \brief File containing implementation of LauBkgndDPModel class. */ #include #include #include #include "TH2.h" #include "TRandom.h" #include "TSystem.h" #include "Lau2DHistDPPdf.hh" #include "Lau2DSplineDPPdf.hh" #include "LauBkgndDPModel.hh" #include "LauDaughters.hh" #include "LauFitDataTree.hh" #include "LauKinematics.hh" #include "LauRandom.hh" #include "LauVetoes.hh" ClassImp(LauBkgndDPModel) -LauBkgndDPModel::LauBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes) : +LauBkgndDPModel::LauBkgndDPModel(LauDaughters* daughters, const LauVetoes* vetoes) : LauAbsBkgndDPModel(daughters, vetoes) { if (daughters != nullptr) { symmetricalDP_ = daughters->gotSymmetricalDP(); } } void LauBkgndDPModel::initialise() { } Bool_t LauBkgndDPModel::checkHistoLimits(const TH2* histo) { if ( squareDP_ ) { // x-axis is mPrime - should be exactly 0.0 ... 1.0 if ( ( histo->GetXaxis()->GetXmin() != 0.0 ) || ( histo->GetXaxis()->GetXmax() != 1.0 ) ) { std::cerr << "ERROR in LauBkgndDPModel::checkHistoLimits : squareDP flag set to true but x-axis limits are not 0.0 ... 1.0" << std::endl; return kFALSE; } // y-axis is thPrime - should be exactly 0.0 ... 1.0 (or 0.0 ... 0.5 if symmetricalDP_) if ( ( histo->GetYaxis()->GetXmin() != 0.0 ) || ( histo->GetYaxis()->GetXmax() != (symmetricalDP_ ? 0.5 : 1.0) ) ) { std::cerr << "ERROR in LauBkgndDPModel::checkHistoLimits : squareDP flag set to true but y-axis limits are not 0.0 ... " << (symmetricalDP_ ? "0.5" : "1.0") << std::endl; return kFALSE; } } else { // determine the rectangle that encloses the DP LauKinematics* kinematics { this->getKinematics() }; Double_t m13Sq_min { kinematics->getm13SqMin() }; Double_t m13Sq_max { kinematics->getm13SqMax() }; Double_t m23Sq_min { kinematics->getm23SqMin() }; Double_t m23Sq_max { kinematics->getm23SqMax() }; // if the histo is upper half only, need to allow for the effect of folding if ( symmetricalDP_ ) { // cache the state of the kinematics object const Double_t cache_m13Sq { kinematics->getm13Sq() }; const Double_t cache_m23Sq { kinematics->getm23Sq() }; // find the coordinates of the top right of the fold const Double_t m12_min { kinematics->getm12Min() }; kinematics->updateKinematicsFrom12( m12_min, 0.0 ); m13Sq_max = kinematics->getm13Sq(); // find the coordinates of the bottom left of the fold const Double_t m12_max { kinematics->getm12Max() }; kinematics->updateKinematicsFrom12( m12_max, 0.0 ); m23Sq_min = kinematics->getm23Sq(); // restore previous state kinematics->updateKinematics(cache_m13Sq, cache_m23Sq); } // x-axis is m13Sq - should enclose m13Sq_min ... m13Sq_max if ( ( histo->GetXaxis()->GetXmin() > m13Sq_min ) || ( histo->GetXaxis()->GetXmax() < m13Sq_max ) ) { std::cerr << "ERROR in LauBkgndDPModel::checkHistoLimits : squareDP flag set to false but x-axis limits do not enclose " << m13Sq_min << " ... " << m13Sq_max << std::endl; return kFALSE; } // y-axis is m23Sq - should enclose m23Sq_min ... m23Sq_max if ( ( histo->GetYaxis()->GetXmin() > m23Sq_min ) || ( histo->GetYaxis()->GetXmax() < m23Sq_max ) ) { std::cerr << "ERROR in LauBkgndDPModel::checkHistoLimits : squareDP flag set to false but y-axis limits do not enclose " << m23Sq_min << " ... " << m23Sq_max << std::endl; return kFALSE; } } // otherwise all OK return kTRUE; } void LauBkgndDPModel::setBkgndHisto(const TH2* histo, const Bool_t useInterpolation, const Bool_t fluctuateBins, const Bool_t useUpperHalfOnly, const Bool_t squareDP) { squareDP_ = squareDP; symmetricalDP_ &= useUpperHalfOnly; if ( symmetricalDP_ ) { std::cout << "INFO in LauBkgndDPModel::setBkgndHisto : Background histogram \"" << histo->GetName() << "\" has upper half only" << std::endl; } if ( ! histo ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndHisto : Supplied background histogram pointer is null, likelihood for this component will be flat in the Dalitz plot" << std::endl; } else if ( ! this->checkHistoLimits( histo ) ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndHisto : Supplied background histogram does not seem to properly enclose the Dalitz plot - please check the axis ranges" << std::endl; } LauKinematics* kinematics = this->getKinematics(); const LauVetoes* vetoes = this->getVetoes(); bgHistDPPdf_ = std::make_unique(histo, kinematics, vetoes, useInterpolation, fluctuateBins, symmetricalDP_, squareDP); maxHeight_ = bgHistDPPdf_->getMaxHeight(); pdfNorm_ = bgHistDPPdf_->getHistNorm(); } void LauBkgndDPModel::setBkgndSpline(const TH2* histo, const Bool_t fluctuateBins, const Bool_t useUpperHalfOnly, const Bool_t squareDP) { squareDP_ = squareDP; symmetricalDP_ &= useUpperHalfOnly; if ( symmetricalDP_ ) { std::cout << "INFO in LauBkgndDPModel::setBkgndSpline : Background histogram \"" << histo->GetName() << "\" has upper half only" << std::endl; } if ( ! histo ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndSpline : Supplied background histogram pointer is null, construction of spline will fail" << std::endl; } else if ( ! this->checkHistoLimits( histo ) ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndSpline : Supplied background histogram does not seem to properly enclose the Dalitz plot - please check the axis ranges" << std::endl; } LauKinematics* kinematics = this->getKinematics(); const LauVetoes* vetoes = this->getVetoes(); bgHistDPPdf_ = std::make_unique(histo, kinematics, vetoes, fluctuateBins, symmetricalDP_, squareDP); maxHeight_ = bgHistDPPdf_->getMaxHeight(); pdfNorm_ = bgHistDPPdf_->getHistNorm(); } Double_t LauBkgndDPModel::getHistValue(const Double_t xVal, const Double_t yVal) { // Get the likelihood value of the background in the Dalitz plot. // Check that we have a valid histogram PDF if ( ! bgHistDPPdf_ ) { std::cerr << "WARNING in LauBkgndDPModel::getHistValue : We don't have a histogram so assuming the likelihood is flat in the Dalitz plot." << std::endl; this->setBkgndHisto( nullptr, kFALSE, kFALSE, kFALSE, kFALSE ); } // Find out the un-normalised PDF value const Double_t value { bgHistDPPdf_->interpolateXY(xVal, yVal) }; // Check that the value is greater than zero // If we're using a spline then negative values can be caused by adjacent bins that all contain a value of zero. // The spline requires the value, its first derivatives and the mixed second derivative to be continuous and to match the input histogram // at the bin centres. Derivatives are calculated using a finite difference approximation taking the difference between the neighbouring bins. // If two bins are zero but the third is not then the second bin will have a positive first derivative causing the spline to dip below zero // between the two zero bins to remain smooth. Such dips are unavoidable but are correctly removed here. if ( value < 0.0 ) { if(!lowBinWarningIssued_) { std::cerr << "WARNING in LauBkgndDPModel::getHistValue : Value " << value << " is less than 0 - setting to 0. You may want to check your histogram!\n" << " : If you are using a spline then this could be caused by adjacent empty bins.\n" << " : Further warnings will be suppressed." << std::endl; lowBinWarningIssued_ = kTRUE; } return 0.0; } return value; } LauKinematics* LauBkgndDPModel::genUniformPoint() { LauKinematics* kinematics = this->getKinematics(); if ( squareDP_ ) { // Generate a point in m', theta' space Double_t mPrime{0.0}, thetaPrime{0.0}; kinematics->genFlatSqDP(mPrime, thetaPrime); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && thetaPrime > 0.5 ) { thetaPrime = 1.0 - thetaPrime; } // Update the kinematics based on this generated point kinematics->updateSqDPKinematics(mPrime, thetaPrime); } else { // Generate a point in the Dalitz plot Double_t m13Sq{0.0}, m23Sq{0.0}; kinematics->genFlatPhaseSpace(m13Sq, m23Sq); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && m13Sq > m23Sq ) { std::swap( m13Sq, m23Sq ); } // Update the kinematics based on this generated point kinematics->updateKinematics(m13Sq, m23Sq); } return kinematics; } LauKinematics* LauBkgndDPModel::generate() { // Routine to generate the background, using data provided by an // already defined histogram. LauKinematics* kinematics { this->getKinematics() }; const LauVetoes* vetoes { this->getVetoes() }; Bool_t gotBG{kFALSE}; do { if (squareDP_ == kTRUE) { // Generate a point in m', theta' space Double_t mPrime{0.0}, thetaPrime{0.0}; kinematics->genFlatSqDP(mPrime, thetaPrime); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && thetaPrime > 0.5 ) { thetaPrime = 1.0 - thetaPrime; } // Calculate histogram height for DP point and // compare with the maximum height if ( bgHistDPPdf_ != nullptr ) { const Double_t bgContDP { bgHistDPPdf_->interpolateXY(mPrime, thetaPrime) / maxHeight_ }; if (LauRandom::randomFun()->Rndm() < bgContDP) { kinematics->updateSqDPKinematics(mPrime, thetaPrime); gotBG = kTRUE; } } else { if ( !doneGenWarning_ ) { std::cerr << "WARNING in LauBkgndDPModel::generate : We don't have a histogram so generating flat in the square DP, which won't be flat in the conventional DP!" << std::endl; std::cerr << "WARNING in LauBkgndDPModel::generate : This should never happen!! What have you done?!" << std::endl; doneGenWarning_ = kTRUE; } kinematics->updateSqDPKinematics(mPrime, thetaPrime); gotBG = kTRUE; } } else { // Generate a point in the Dalitz plot (phase-space). Double_t m13Sq{0.0}, m23Sq{0.0}; kinematics->genFlatPhaseSpace(m13Sq, m23Sq); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && m13Sq > m23Sq ) { Double_t tmpSq = m13Sq; m13Sq = m23Sq; m23Sq = tmpSq; } // Calculate histogram height for DP point and // compare with the maximum height if ( bgHistDPPdf_ != nullptr ) { const Double_t bgContDP { bgHistDPPdf_->interpolateXY(m13Sq, m23Sq) / maxHeight_ }; if (LauRandom::randomFun()->Rndm() < bgContDP) { kinematics->updateKinematics(m13Sq, m23Sq); gotBG = kTRUE; } } else { if ( !doneGenWarning_ ) { std::cerr << "WARNING in LauBkgndDPModel::generate : We don't have a histogram so generating flat in the DP." << std::endl; doneGenWarning_ = kTRUE; } kinematics->updateKinematics(m13Sq, m23Sq); gotBG = kTRUE; } } // Implement any vetoes if ( gotBG && vetoes ) { gotBG = vetoes->passVeto(kinematics); } } while ( ! gotBG ); return kinematics; } void LauBkgndDPModel::fillDataTree(const LauFitDataTree& inputFitTree) { // In LauFitDataTree, the first two variables should always be // m13^2 and m23^2. Other variables follow thus: charge. const Bool_t hasBranches { inputFitTree.haveBranch("m13Sq") && inputFitTree.haveBranch("m23Sq") }; if ( ! hasBranches ) { std::cerr << "ERROR in LauBkgndDPModel::fillDataTree : Unable to find the variables \"m13Sq\" and \"m23Sq\" in input data tree!\n"; std::cerr << " : Make sure you have the right variables in your input data file!" << std::endl; return; } const UInt_t nEvents { inputFitTree.nEvents() }; // clear and resize the data vector bgData_.clear(); bgData_.resize(nEvents); LauKinematics* kinematics { this->getKinematics() }; for (UInt_t iEvt {0}; iEvt < nEvents; iEvt++) { const LauFitData& dataValues { inputFitTree.getData(iEvt) }; kinematics->updateKinematics(dataValues.at("m13Sq"), dataValues.at("m23Sq")); this->calcLikelihoodInfo(kinematics); bgData_[iEvt] = curEvtPdfVal_; } } void LauBkgndDPModel::calcLikelihoodInfo(const LauKinematics* kinematics) { if ( squareDP_ ) { const Double_t mPrime { kinematics->getmPrime() }; const Double_t thetaPrime { kinematics->getThetaPrime() }; const Double_t jacobian { kinematics->calcSqDPJacobian() }; curEvtHistVal_ = this->getHistValue(mPrime, thetaPrime); curEvtPdfVal_ = curEvtHistVal_ / jacobian; } else { const Double_t m13Sq { kinematics->getm13Sq() }; const Double_t m23Sq { kinematics->getm23Sq() }; curEvtPdfVal_ = curEvtHistVal_ = this->getHistValue(m13Sq, m23Sq); } } Double_t LauBkgndDPModel::getUnNormValue(const UInt_t iEvt) { // Retrieve the likelihood for the given event this->setDataEventNo(iEvt); return curEvtPdfVal_; } Double_t LauBkgndDPModel::getLikelihood(const UInt_t iEvt) { // Retrieve the likelihood for the given event this->setDataEventNo(iEvt); return curEvtPdfVal_ / pdfNorm_; } void LauBkgndDPModel::setDataEventNo(const UInt_t iEvt) { // Retrieve the data for event iEvt if (bgData_.size() > iEvt) { curEvtPdfVal_ = bgData_[iEvt]; } else { std::cerr << "ERROR in LauBkgndDPModel::setDataEventNo : Event index too large: " << iEvt << " >= " << bgData_.size() << std::endl; } } diff --git a/src/LauVetoes.cc b/src/LauVetoes.cc index 6dc7af4..9348a55 100644 --- a/src/LauVetoes.cc +++ b/src/LauVetoes.cc @@ -1,215 +1,238 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauVetoes.cc \brief File containing implementation of LauVetoes class. */ -#include +#include "LauVetoes.hh" +#include "LauJsonTools.hh" #include "LauKinematics.hh" -#include "LauVetoes.hh" -ClassImp(LauVetoes) +#include +#include +ClassImp(LauVetoes) -LauVetoes::LauVetoes() : - nVetoes_(0) -{ -} -LauVetoes::~LauVetoes() +void LauVetoes::addMassVeto(const UInt_t resPairAmpInt, const Double_t minMass, const Double_t maxMass) { -} + const Double_t minMassSq { minMass*minMass }; + const Double_t maxMassSq { maxMass*maxMass }; -LauVetoes::LauVetoes(const LauVetoes& other) : - nVetoes_(other.nVetoes_), - vetoPair_(other.vetoPair_), - vetoMinMass_(other.vetoMinMass_), - vetoMaxMass_(other.vetoMaxMass_) -{ + this->addMassSqVeto( resPairAmpInt, minMassSq, maxMassSq ); } -LauVetoes& LauVetoes::operator=(const LauVetoes& other) +void LauVetoes::addMassSqVeto(const UInt_t resPairAmpInt, const Double_t minMassSq, const Double_t maxMassSq) { - if ( &other != this ) { - nVetoes_ = other.nVetoes_; - vetoPair_ = other.vetoPair_; - vetoMinMass_ = other.vetoMinMass_; - vetoMaxMass_ = other.vetoMaxMass_; + if ( ! this->checkVeto(resPairAmpInt, minMassSq, maxMassSq) ) { + return; } - return *this; -} - -void LauVetoes::addMassVeto(const Int_t resPairAmpInt, const Double_t minMass, const Double_t maxMass) -{ - const Double_t minMassSq = minMass*minMass; - const Double_t maxMassSq = maxMass*maxMass; + vetoPair_.push_back(resPairAmpInt); + vetoMinMass_.push_back(minMassSq); + vetoMaxMass_.push_back(maxMassSq); - this->addMassSqVeto(resPairAmpInt, minMassSq, maxMassSq); + this->printVeto( vetoPair_.size() - 1 ); } -void LauVetoes::addMassSqVeto(const Int_t resPairAmpInt, const Double_t minMassSq, const Double_t maxMassSq) +Bool_t LauVetoes::checkVeto(const UInt_t resPairAmpInt, const Double_t minMassSq, const Double_t maxMassSq) const { - // Routine to add a veto in the Dalitz plot. The function takes as input the - // bachelor track number (1, 2 or 3) and the mass-squared range of the veto. - - if (resPairAmpInt == 1) { - - // The bachelor track is the first track - std::cout << "INFO in LauVetoes::addMassSqVeto : Adding the veto for resPairAmpInt = 1, with " << minMassSq << " < m^2_23 < " << maxMassSq << std::endl; - - } else if (resPairAmpInt == 2) { - - // The bachelor track is the second track - std::cout << "INFO in LauVetoes::addMassSqVeto : Adding the veto for resPairAmpInt = 2, with " << minMassSq << " < m^2_13 < " << maxMassSq << std::endl; + Bool_t ok{kTRUE}; - } else if (resPairAmpInt == 3) { - - // The bachelor track is the third track - std::cout << "INFO in LauVetoes::addMassSqVeto : Adding the veto for resPairAmpInt = 3, with " << minMassSq << " < m^2_12 < " << maxMassSq << std::endl; - - } else if (resPairAmpInt == 4) { - - // Special case for symmetric DPs - the veto will be applied on the minimum of m13Sq and m23Sq - std::cout << "INFO in LauVetoes::addMassSqVeto : Adding the veto for resPairAmpInt = 4, with " << minMassSq << " < m^2_min < " << maxMassSq << std::endl; + if ( resPairAmpInt < 1 || resPairAmpInt > 5 ) { + std::cerr << "ERROR in LauVetoes::checkVeto : Invalid resPairAmpInt ( " << resPairAmpInt << " ), please use 1, 2 or 3 to specify bachelor daughter track, or 4 or 5 to specify a veto on mMinSq or mMaxSq in a symmetric DP" << std::endl; + ok = kFALSE; + } - } else if (resPairAmpInt == 5) { + if ( maxMassSq <= minMassSq ) { + std::cerr << "ERROR in LauVetoes::checkVeto : Invalid masses, max mass^2 ( " << maxMassSq << " ) <= min mass^2 ( " << minMassSq << " )" << std::endl; + ok = kFALSE; + } - // Special case for symmetric DPs - the veto will be applied on the maximum of m13Sq and m23Sq - std::cout << "INFO in LauVetoes::addMassSqVeto : Adding the veto for resPairAmpInt = 5, with " << minMassSq << " < m^2_max < " << maxMassSq << std::endl; + return ok; +} - } else { - std::cerr << "ERROR in LauVetoes::addMassSqVeto : Invalid resPairAmpInt. Please use 1, 2 or 3 to specify bachelor daughter track (or 4 or 5 to specify a veto on mMinSq or mMaxSq in a symmetric DP). Veto is not added." << std::endl; - return; - } +void LauVetoes::printVeto(const std::size_t index) const +{ + const auto resPairAmpInt { vetoPair_[index] }; + const auto minMassSq { vetoMinMass_[index] }; + const auto maxMassSq { vetoMaxMass_[index] }; - // Set the veto limits - vetoPair_.push_back(resPairAmpInt); - vetoMinMass_.push_back(minMassSq); - vetoMaxMass_.push_back(maxMassSq); + const std::array massNames { "m^2_23", "m^2_13", "m^2_12", "m^2_min", "m^2_max" }; - // Keep track of how many vetoes we have - ++nVetoes_; + std::cout << "INFO in LauVetoes::printVeto : Adding the veto for resPairAmpInt = " << resPairAmpInt << ", with " << minMassSq << " < " << massNames[resPairAmpInt-1] << " < " << maxMassSq << std::endl; } Bool_t LauVetoes::passVeto(const LauKinematics* kinematics) const { - // Routine to ask whether the given Dalitz plot point passes any specified vetoes. - if (kinematics == 0) { + if ( ! kinematics ) { std::cerr << "ERROR in LauVetoes::passVeto : LauKinematics object is null." << std::endl; return kFALSE; } - const Double_t m12Sq = kinematics->getm12Sq(); - const Double_t m23Sq = kinematics->getm23Sq(); - const Double_t m13Sq = kinematics->getm13Sq(); - const Bool_t symmetricDP = kinematics->gotSymmetricalDP(); - const Bool_t fullySymmetricDP = kinematics->gotFullySymmetricDP(); - - return this->passVeto(m12Sq, m23Sq, m13Sq, symmetricDP, fullySymmetricDP); + return this->passVeto( + kinematics->getm12Sq(), + kinematics->getm23Sq(), + kinematics->getm13Sq(), + kinematics->gotSymmetricalDP(), + kinematics->gotFullySymmetricDP() + ); } -Bool_t LauVetoes::passVeto(const Double_t m12Sq, const Double_t m23Sq, const Double_t m13Sq, const Bool_t symmetricDP, const Bool_t fullySymmetricDP) const +Bool_t LauVetoes::passVeto(const Double_t m12Sq, const Double_t m23Sq, const Double_t m13Sq, const Bool_t symmetricDP, const Bool_t fullySymmetricDP) const { // Routine to ask whether the given Dalitz plot point passes any specified vetoes. // Loop over the number of possible vetoes - for ( UInt_t i(0); i < nVetoes_; ++i) { + const auto nVetoes { vetoPair_.size() }; + for ( std::size_t i{0}; i < nVetoes; ++i) { if (vetoPair_[i] == 1) { // Veto m23 combination if (m23Sq > vetoMinMass_[i] && m23Sq < vetoMaxMass_[i]) { return kFALSE; } // If the DP is symmetric we need to test m13 combination as well if ( symmetricDP || fullySymmetricDP ) { if (m13Sq > vetoMinMass_[i] && m13Sq < vetoMaxMass_[i]) { return kFALSE; } } // If it's fully symmetric we need to check all 3 combinations if ( fullySymmetricDP ) { if (m12Sq > vetoMinMass_[i] && m12Sq < vetoMaxMass_[i]) { return kFALSE; } } } else if (vetoPair_[i] == 2) { // Veto m13 combination if (m13Sq > vetoMinMass_[i] && m13Sq < vetoMaxMass_[i]) { return kFALSE; } // If the DP is symmetric we need to test m23 combination as well if ( symmetricDP || fullySymmetricDP ) { if (m23Sq > vetoMinMass_[i] && m23Sq < vetoMaxMass_[i]) { return kFALSE; } } // If it's fully symmetric we need to check all 3 combinations if ( fullySymmetricDP ) { if (m12Sq > vetoMinMass_[i] && m12Sq < vetoMaxMass_[i]) { return kFALSE; } } } else if (vetoPair_[i] == 3) { // Veto m12 combination if (m12Sq > vetoMinMass_[i] && m12Sq < vetoMaxMass_[i]) { return kFALSE; } // If it's fully symmetric we need to check all 3 combinations if ( fullySymmetricDP ) { if (m13Sq > vetoMinMass_[i] && m13Sq < vetoMaxMass_[i]) { return kFALSE; } if (m23Sq > vetoMinMass_[i] && m23Sq < vetoMaxMass_[i]) { return kFALSE; } } } else if (vetoPair_[i] == 4) { if (!symmetricDP) { std::cerr << "WARNING in LauVetoes::passVeto : resPairAmpInt of 4 is only valid for symmetric DPs, will ignore this veto" << std::endl; continue; } // Veto mMin combination - const Double_t mMinSq = TMath::Min( m13Sq, m23Sq ); + const Double_t mMinSq { TMath::Min( m13Sq, m23Sq ) }; if (mMinSq > vetoMinMass_[i] && mMinSq < vetoMaxMass_[i]) { return kFALSE; } } else if (vetoPair_[i] == 5) { if (!symmetricDP) { std::cerr << "WARNING in LauVetoes::passVeto : resPairAmpInt of 5 is only valid for symmetric DPs, will ignore this veto" << std::endl; continue; } // Veto mMax combination - const Double_t mMaxSq = TMath::Max( m13Sq, m23Sq ); + const Double_t mMaxSq { TMath::Max( m13Sq, m23Sq ) }; if (mMaxSq > vetoMinMass_[i] && mMaxSq < vetoMaxMass_[i]) { return kFALSE; } } } return kTRUE; } +void LauVetoes::checkValidity() +{ + // check that the size of all vectors is the same + const auto nVetoes { vetoPair_.size() }; + if ( nVetoes != vetoMinMass_.size() || nVetoes != vetoMaxMass_.size() ) { + std::string errmsg{ "ERROR in LauVetoes::checkValidity : the size of the JSON arrays is inconsistent" }; + std::cerr << errmsg << std::endl; + throw LauJsonTools::InvalidJson( errmsg ); + } + + for ( std::size_t i{0}; i < nVetoes; ++i) { + if ( ! this->checkVeto( vetoPair_[i], vetoMinMass_[i], vetoMaxMass_[i] ) ) { + throw LauJsonTools::InvalidJson( "ERROR in LauVetoes::checkValidity : problem with definition of veto" ); + } + } + + for ( std::size_t i{0}; i < nVetoes; ++i) { + this->printVeto( i ); + } +} + +std::unique_ptr LauVetoes::readFromJson(const TString& fileName, const TString& elementName) +{ + // NB deliberately not using uniform initialisation here because of this issue: + // https://json.nlohmann.me/home/faq/#brace-initialization-yields-arrays + const nlohmann::json j = LauJsonTools::readJsonFile( fileName.Data(), elementName.Data(), LauJsonTools::JsonType::Object ); + if ( j.is_null() ) { + if ( elementName != "" ) { + std::cerr << "ERROR in LauVetoes::readFromJson : unable to retrieve JSON object from element \"" << elementName << "\" in file \"" << fileName << "\"" << std::endl; + } else { + std::cerr << "ERROR in LauVetoes::readFromJson : unable to retrieve JSON object from root element of file \"" << fileName << "\"" << std::endl; + } + return {}; + } + + auto vetoes { std::make_unique() }; + j.get_to(*vetoes); + + vetoes->checkValidity(); + + return vetoes; +} + +void LauVetoes::writeToJson(const TString& fileName, const TString& elementName, const bool append, const int indent) const +{ + const nlohmann::json j = *this; + + const bool writeOK { LauJsonTools::writeJsonFile( j, fileName.Data(), elementName.Data(), append, indent ) }; + if ( ! writeOK ) { + std::cerr << "ERROR in LauVetoes::writeToJson : could not successfully write to file \"" << fileName << std::endl; + } +} diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 3bf2257..9f13296 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,24 +1,26 @@ list(APPEND TEST_SOURCES TestCovariant TestCovariant2 TestNewKinematicsMethods TestFitSplineToTH1 TestFitDoubleSplineToTH1 TestSplineDTAdivision TestWriteCoeffSetToJson TestWriteSplineToJson + TestWriteVetoesToJson TestReadCoeffSetFromJson TestReadDPModelFromJson TestReadSplineFromJson + TestReadVetoesFromJson TestSplineFindMax ) foreach( _test ${TEST_SOURCES}) add_executable(${_test} ${_test}.cc) target_link_libraries(${_test} PRIVATE Laura++) install(TARGETS ${_test} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) endforeach() install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/test-model.json ${CMAKE_CURRENT_SOURCE_DIR}/test-coeffset.json DESTINATION ${CMAKE_INSTALL_BINDIR}) diff --git a/test/TestReadVetoesFromJson.cc b/test/TestReadVetoesFromJson.cc new file mode 100644 index 0000000..8815d59 --- /dev/null +++ b/test/TestReadVetoesFromJson.cc @@ -0,0 +1,34 @@ +#include "LauVetoes.hh" + +#include +#include + +void loadUnnamedVetoes() +{ + auto vetoes { LauVetoes::readFromJson("test-write-empty-vetoes-noname.json") }; + if ( not vetoes ) { + std::cerr << "nullptr returned from reading \"test-write-empty-vetoes-noname.json\"" << std::endl; + return; + } +} + +void loadNamedVetoes() +{ + auto vetoes = LauVetoes::readFromJson("test-write-vetoes-named.json","vetoes_empty"); + if ( not vetoes ) { + std::cerr << "nullptr returned from reading \"vetoes_empty\" from \"test-write-vetoes-named.json\"" << std::endl; + return; + } + + vetoes = LauVetoes::readFromJson("test-write-vetoes-named.json","vetoes_3"); + if ( not vetoes ) { + std::cerr << "nullptr returned from reading \"vetoes_3\" from \"test-write-vetoes-named.json\"" << std::endl; + return; + } +} + +int main() +{ + loadUnnamedVetoes(); + loadNamedVetoes(); +} diff --git a/test/TestWriteVetoesToJson.cc b/test/TestWriteVetoesToJson.cc new file mode 100644 index 0000000..0b7db18 --- /dev/null +++ b/test/TestWriteVetoesToJson.cc @@ -0,0 +1,16 @@ +#include "LauVetoes.hh" + +int main() +{ + LauVetoes vetoes; + + vetoes.writeToJson("test-write-empty-vetoes-noname.json"); + + vetoes.writeToJson("test-write-vetoes-named.json", "vetoes_empty"); + + vetoes.addMassVeto( 1, 0.0, 2.1 ); + vetoes.addMassVeto( 2, 0.0, 2.1 ); + vetoes.addMassVeto( 3, 1.8, 1.9 ); + + vetoes.writeToJson("test-write-vetoes-named.json", "vetoes_3", true); +}