diff --git a/src/LauAbsFitModel.cc b/src/LauAbsFitModel.cc index 8445ee6..44947bc 100644 --- a/src/LauAbsFitModel.cc +++ b/src/LauAbsFitModel.cc @@ -1,1207 +1,1207 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauAbsFitModel.cc \brief File containing implementation of LauAbsFitModel class. */ #include #include #include #include "TMessage.h" #include "TMonitor.h" #include "TServerSocket.h" #include "TSocket.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsFitModel.hh" #include "LauAbsFitter.hh" #include "LauAbsPdf.hh" #include "LauComplex.hh" #include "LauFitter.hh" #include "LauFitDataTree.hh" #include "LauGenNtuple.hh" #include "LauParameter.hh" #include "LauParamFixed.hh" #include "LauPrint.hh" #include "LauSPlot.hh" ClassImp(LauAbsFitModel) LauAbsFitModel::LauAbsFitModel() : fixParams_(kFALSE), compareFitData_(kFALSE), savePDF_(kFALSE), writeLatexTable_(kFALSE), writeSPlotData_(kFALSE), storeDPEff_(kFALSE), randomFit_(kFALSE), emlFit_(kFALSE), poissonSmear_(kFALSE), enableEmbedding_(kFALSE), usingDP_(kTRUE), pdfsDependOnDP_(kFALSE), inputFitData_(0), genNtuple_(0), sPlotNtuple_(0), nullString_(""), doSFit_(kFALSE), sWeightBranchName_(""), sWeightScaleFactor_(1.0), outputTableName_(""), fitToyMCFileName_("fitToyMC.root"), fitToyMCTableName_("fitToyMCTable"), fitToyMCScale_(10), fitToyMCPoissonSmear_(kFALSE), sPlotFileName_(""), sPlotTreeName_(""), sPlotVerbosity_(LauOutputLevel::Quiet) { } LauAbsFitModel::~LauAbsFitModel() { delete inputFitData_; inputFitData_ = 0; delete genNtuple_; genNtuple_ = 0; delete sPlotNtuple_; sPlotNtuple_ = 0; // Remove the components created to apply constraints to fit parameters for ( auto par : conVars_ ) { if ( ! par->isLValue() ){ delete par; } } } void LauAbsFitModel::run(const TString& applicationCode, const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileName) { // Chose whether you want to generate or fit events in the Dalitz plot. // To generate events choose applicationCode = "gen", to fit events choose // applicationCode = "fit". TString runCode(applicationCode); runCode.ToLower(); TString histFileNameCopy(histFileName); TString tableFileNameCopy(tableFileName); TString dataFileNameCopy(dataFileName); TString dataTreeNameCopy(dataTreeName); // Initialise the fit par vectors. Each class that inherits from this one // must implement this sensibly for all vectors specified in clearFitParVectors, // i.e. specify parameter names, initial, min, max and fixed values this->initialise(); // Add variables to Gaussian constrain to a list this->addConParameters(); if (dataFileNameCopy == "") {dataFileNameCopy = "data.root";} if (dataTreeNameCopy == "") {dataTreeNameCopy = "genResults";} if (runCode.Contains("gen")) { if (histFileNameCopy == "") {histFileNameCopy = "parInfo.root";} if (tableFileNameCopy == "") {tableFileNameCopy = "genResults";} this->setGenValues(); this->generate(dataFileNameCopy, dataTreeNameCopy, histFileNameCopy, tableFileNameCopy); } else if (runCode.Contains("fit")) { if (histFileNameCopy == "") {histFileNameCopy = "parInfo.root";} if (tableFileNameCopy == "") {tableFileNameCopy = "fitResults";} this->fit(dataFileNameCopy, dataTreeNameCopy, histFileNameCopy, tableFileNameCopy); } else if (runCode.Contains("plot")) { this->savePDFPlots("plot"); } else if (runCode.Contains("weight")) { this->weightEvents(dataFileNameCopy, dataTreeNameCopy); } } void LauAbsFitModel::doSFit( const TString& sWeightBranchName, Double_t scaleFactor ) { if ( sWeightBranchName == "" ) { std::cerr << "WARNING in LauAbsFitModel::doSFit : sWeight branch name is empty string, not setting-up sFit." << std::endl; return; } doSFit_ = kTRUE; sWeightBranchName_ = sWeightBranchName; sWeightScaleFactor_ = scaleFactor; } void LauAbsFitModel::setBkgndClassNames( const std::vector& names ) { if ( !bkgndClassNames_.empty() ) { std::cerr << "WARNING in LauAbsFitModel::setBkgndClassNames : Names already stored, not changing them." << std::endl; return; } UInt_t nBkgnds = names.size(); for ( UInt_t i(0); i < nBkgnds; ++i ) { bkgndClassNames_.insert( std::make_pair( i, names[i] ) ); } this->setupBkgndVectors(); } Bool_t LauAbsFitModel::validBkgndClass( const TString& className ) const { if ( bkgndClassNames_.empty() ) { return kFALSE; } Bool_t found(kFALSE); for ( LauBkgndClassMap::const_iterator iter = bkgndClassNames_.begin(); iter != bkgndClassNames_.end(); ++iter ) { if ( iter->second == className ) { found = kTRUE; break; } } return found; } UInt_t LauAbsFitModel::bkgndClassID( const TString& className ) const { if ( ! this->validBkgndClass( className ) ) { std::cerr << "ERROR in LauAbsFitModel::bkgndClassID : Request for ID for invalid background class \"" << className << "\"." << std::endl; return (bkgndClassNames_.size() + 1); } UInt_t bgID(0); for ( LauBkgndClassMap::const_iterator iter = bkgndClassNames_.begin(); iter != bkgndClassNames_.end(); ++iter ) { if ( iter->second == className ) { bgID = iter->first; break; } } return bgID; } const TString& LauAbsFitModel::bkgndClassName( UInt_t classID ) const { LauBkgndClassMap::const_iterator iter = bkgndClassNames_.find( classID ); if ( iter == bkgndClassNames_.end() ) { std::cerr << "ERROR in LauAbsFitModel::bkgndClassName : Request for name of invalid background class ID " << classID << "." << std::endl; return nullString_; } return iter->second; } void LauAbsFitModel::clearFitParVectors() { std::cout << "INFO in LauAbsFitModel::clearFitParVectors : Clearing fit variable vectors" << std::endl; // Remove the components created to apply constraints to fit parameters for ( auto par : conVars_ ) { if ( ! par->isLValue() ){ delete par; } } conVars_.clear(); resVars_.clear(); fitVars_.clear(); } void LauAbsFitModel::clearExtraVarVectors() { std::cout << "INFO in LauAbsFitModel::clearExtraVarVectors : Clearing extra ntuple variable vectors" << std::endl; extraVars_.clear(); } void LauAbsFitModel::setGenValues() { // makes sure each parameter holds its genValue as its current value for (LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter) { (*iter)->value((*iter)->genValue()); } this->propagateParUpdates(); } void LauAbsFitModel::writeSPlotData(const TString& fileName, const TString& treeName, Bool_t storeDPEfficiency, const LauOutputLevel verbosity) { if (this->writeSPlotData()) { std::cerr << "ERROR in LauAbsFitModel::writeSPlotData : Already have an sPlot ntuple setup, not doing it again." << std::endl; return; } writeSPlotData_ = kTRUE; sPlotFileName_ = fileName; sPlotTreeName_ = treeName; sPlotVerbosity_ = verbosity; storeDPEff_ = storeDPEfficiency; } // TODO : histFileName isn't used here at the moment but could be used for // storing the values of the parameters used in the generation. // These could then be read and used for setting the "true" values // in a subsequent fit. void LauAbsFitModel::generate(const TString& dataFileName, const TString& dataTreeName, const TString& /*histFileName*/, const TString& tableFileNameBase) { // Create the ntuple for storing the results std::cout << "INFO in LauAbsFitModel::generate : Creating generation ntuple." << std::endl; if (genNtuple_ != 0) {delete genNtuple_; genNtuple_ = 0;} genNtuple_ = new LauGenNtuple(dataFileName,dataTreeName); // add branches for storing the experiment number and the number of // the event within the current experiment this->addGenNtupleIntegerBranch("iExpt"); this->addGenNtupleIntegerBranch("iEvtWithinExpt"); this->setupGenNtupleBranches(); // Start the cumulative timer cumulTimer_.Start(); const UInt_t firstExp = this->firstExpt(); const UInt_t nExp = this->nExpt(); Bool_t genOK(kTRUE); do { // Loop over the number of experiments for (UInt_t iExp = firstExp; iExp < (firstExp+nExp); ++iExp) { // Start the timer to see how long each experiment takes to generate timer_.Start(); // Store the experiment number in the ntuple this->setGenNtupleIntegerBranchValue("iExpt",iExp); // Do the generation for this experiment std::cout << "INFO in LauAbsFitModel::generate : Generating experiment number " << iExp << std::endl; genOK = this->genExpt(); // Stop the timer and see how long the program took so far timer_.Stop(); timer_.Print(); if (!genOK) { // delete and recreate an empty tree genNtuple_->deleteAndRecreateTree(); // then break out of the experiment loop std::cerr << "WARNING in LauAbsFitModel::generate : Problem in toy MC generation. Starting again with updated parameters..." << std::endl; break; } if (this->writeLatexTable()) { TString tableFileName(tableFileNameBase); tableFileName += "_"; tableFileName += iExp; tableFileName += ".tex"; this->writeOutTable(tableFileName); } } // Loop over number of experiments } while (!genOK); // Print out total timing info. cumulTimer_.Stop(); std::cout << "INFO in LauAbsFitModel::generate : Finished generating all experiments." << std::endl; std::cout << "INFO in LauAbsFitModel::generate : Cumulative timing:" << std::endl; cumulTimer_.Print(); // Build the event index std::cout << "INFO in LauAbsFitModel::generate : Building experiment:event index." << std::endl; // TODO - can test this return value? //Int_t nIndexEntries = genNtuple_->buildIndex("iExpt","iEvtWithinExpt"); // Write out toy MC ntuple std::cout << "INFO in LauAbsFitModel::generate : Writing data to file " << dataFileName << "." << std::endl; genNtuple_->writeOutGenResults(); } void LauAbsFitModel::addGenNtupleIntegerBranch(const TString& name) { genNtuple_->addIntegerBranch(name); } void LauAbsFitModel::addGenNtupleDoubleBranch(const TString& name) { genNtuple_->addDoubleBranch(name); } void LauAbsFitModel::setGenNtupleIntegerBranchValue(const TString& name, Int_t value) { genNtuple_->setIntegerBranchValue(name,value); } void LauAbsFitModel::setGenNtupleDoubleBranchValue(const TString& name, Double_t value) { genNtuple_->setDoubleBranchValue(name,value); } Int_t LauAbsFitModel::getGenNtupleIntegerBranchValue(const TString& name) const { return genNtuple_->getIntegerBranchValue(name); } Double_t LauAbsFitModel::getGenNtupleDoubleBranchValue(const TString& name) const { return genNtuple_->getDoubleBranchValue(name); } void LauAbsFitModel::fillGenNtupleBranches() { genNtuple_->fillBranches(); } void LauAbsFitModel::addSPlotNtupleIntegerBranch(const TString& name) { sPlotNtuple_->addIntegerBranch(name); } void LauAbsFitModel::addSPlotNtupleDoubleBranch(const TString& name) { sPlotNtuple_->addDoubleBranch(name); } void LauAbsFitModel::setSPlotNtupleIntegerBranchValue(const TString& name, Int_t value) { sPlotNtuple_->setIntegerBranchValue(name,value); } void LauAbsFitModel::setSPlotNtupleDoubleBranchValue(const TString& name, Double_t value) { sPlotNtuple_->setDoubleBranchValue(name,value); } void LauAbsFitModel::fillSPlotNtupleBranches() { sPlotNtuple_->fillBranches(); } void LauAbsFitModel::fit(const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileNameBase) { // Routine to perform the total fit. const UInt_t firstExp = this->firstExpt(); const UInt_t nExp = this->nExpt(); std::cout << "INFO in LauAbsFitModel::fit : First experiment = " << firstExp << std::endl; std::cout << "INFO in LauAbsFitModel::fit : Number of experiments = " << nExp << std::endl; // Start the cumulative timer cumulTimer_.Start(); this->resetFitCounters(); // Create and setup the fit results ntuple this->setupResultsOutputs( histFileName, tableFileNameBase ); // Create and setup the sPlot ntuple if (this->writeSPlotData()) { std::cout << "INFO in LauAbsFitModel::fit : Creating sPlot ntuple." << std::endl; if (sPlotNtuple_ != 0) {delete sPlotNtuple_; sPlotNtuple_ = 0;} sPlotNtuple_ = new LauGenNtuple(sPlotFileName_,sPlotTreeName_); this->setupSPlotNtupleBranches(); } // This reads in the given dataFile and creates an input // fit data tree that stores them for all events and experiments. Bool_t dataOK = this->verifyFitData(dataFileName,dataTreeName); if (!dataOK) { std::cerr << "ERROR in LauAbsFitModel::fit : Problem caching the fit data." << std::endl; gSystem->Exit(EXIT_FAILURE); } // Loop over the number of experiments for (UInt_t iExp = firstExp; iExp < (firstExp+nExp); ++iExp) { // Start the timer to see how long each fit takes timer_.Start(); this->setCurrentExperiment( iExp ); UInt_t nEvents = this->readExperimentData(); if (nEvents < 1) { std::cerr << "WARNING in LauAbsFitModel::fit : Zero events in experiment " << iExp << ", skipping..." << std::endl; timer_.Stop(); continue; } // Now the sub-classes must implement whatever they need to do // to cache any more input fit data they need in order to // calculate the likelihoods during the fit. // They need to use the inputFitData_ tree as input. For example, // inputFitData_ contains m13Sq and m23Sq. The appropriate fit model // then caches the resonance dynamics for the signal model, as // well as the background likelihood values in the Dalitz plot this->cacheInputFitVars(); if ( this->doSFit() ) { this->cacheInputSWeights(); } // Do the fit for this experiment this->fitExpt(); // Write the results into the ntuple this->finaliseFitResults( outputTableName_ ); // Stop the timer and see how long the program took so far timer_.Stop(); timer_.Print(); // Store the per-event likelihood values if ( this->writeSPlotData() ) { this->storePerEvtLlhds(); } // Create a toy MC sample using the fitted parameters so that // the user can compare the fit to the data. if (compareFitData_ == kTRUE && this->statusCode() == 3) { this->createFitToyMC(fitToyMCFileName_, fitToyMCTableName_); } } // Loop over number of experiments // Print out total timing info. cumulTimer_.Stop(); std::cout << "INFO in LauAbsFitModel::fit : Cumulative timing:" << std::endl; cumulTimer_.Print(); // Print out stats on OK fits. const UInt_t nOKFits = this->numberOKFits(); const UInt_t nBadFits = this->numberBadFits(); std::cout << "INFO in LauAbsFitModel::fit : Number of OK Fits = " << nOKFits << std::endl; std::cout << "INFO in LauAbsFitModel::fit : Number of Failed Fits = " << nBadFits << std::endl; Double_t fitEff(0.0); if (nExp != 0) {fitEff = nOKFits/(1.0*nExp);} std::cout << "INFO in LauAbsFitModel::fit : Fit efficiency = " << fitEff*100.0 << "%." << std::endl; // Write out any fit results (ntuples etc...). this->writeOutAllFitResults(); if ( this->writeSPlotData() ) { this->calculateSPlotData(); } } void LauAbsFitModel::setupResultsOutputs( const TString& histFileName, const TString& tableFileName ) { this->LauSimFitTask::setupResultsOutputs( histFileName, tableFileName ); outputTableName_ = tableFileName; } Bool_t LauAbsFitModel::verifyFitData(const TString& dataFileName, const TString& dataTreeName) { // From the input data stream, store the variables into the // internal tree inputFitData_ that can be used by the sub-classes // in calculating their likelihood functions for the fit delete inputFitData_; inputFitData_ = new LauFitDataTree(dataFileName,dataTreeName); Bool_t dataOK = inputFitData_->findBranches(); if (!dataOK) { delete inputFitData_; inputFitData_ = 0; } return dataOK; } void LauAbsFitModel::cacheInputSWeights() { Bool_t hasBranch = inputFitData_->haveBranch( sWeightBranchName_ ); if ( ! hasBranch ) { std::cerr << "ERROR in LauAbsFitModel::cacheInputSWeights : Input data does not contain variable \"" << sWeightBranchName_ << "\".\n"; std::cerr << " : Turning off sFit!" << std::endl; doSFit_ = kFALSE; return; } UInt_t nEvents = this->eventsPerExpt(); sWeights_.clear(); sWeights_.reserve( nEvents ); for (UInt_t iEvt = 0; iEvt < nEvents; ++iEvt) { const LauFitData& dataValues = inputFitData_->getData(iEvt); LauFitData::const_iterator iter = dataValues.find( sWeightBranchName_ ); sWeights_.push_back( iter->second * sWeightScaleFactor_ ); } } void LauAbsFitModel::fitExpt() { // Routine to perform the actual fit for the given experiment // Update initial fit parameters if required (e.g. if using random numbers). this->checkInitFitParams(); // Initialise the fitter LauFitter::fitter()->useAsymmFitErrors( this->useAsymmFitErrors() ); LauFitter::fitter()->twoStageFit( this->twoStageFit() ); LauFitter::fitter()->initialise( this, fitVars_ ); this->startNewFit( LauFitter::fitter()->nParameters(), LauFitter::fitter()->nFreeParameters() ); // Now ready for minimisation step std::cout << "\nINFO in LauAbsFitModel::fitExpt : Start minimisation...\n"; LauAbsFitter::FitStatus fitResult = LauFitter::fitter()->minimise(); // If we're doing a two stage fit we can now release (i.e. float) // the 2nd stage parameters and re-fit if (this->twoStageFit()) { if ( fitResult.status != 3 ) { std::cerr << "WARNING in LauAbsFitModel:fitExpt : Not running second stage fit since first stage failed." << std::endl; LauFitter::fitter()->releaseSecondStageParameters(); } else { LauFitter::fitter()->releaseSecondStageParameters(); this->startNewFit( LauFitter::fitter()->nParameters(), LauFitter::fitter()->nFreeParameters() ); fitResult = LauFitter::fitter()->minimise(); } } const TMatrixD& covMat = LauFitter::fitter()->covarianceMatrix(); this->storeFitStatus( fitResult, covMat ); // Store the final fit results and errors into protected internal vectors that // all sub-classes can use within their own finalFitResults implementation // used below (e.g. putting them into an ntuple in a root file) LauFitter::fitter()->updateParameters(); } void LauAbsFitModel::calculateSPlotData() { if (sPlotNtuple_ != 0) { sPlotNtuple_->addFriendTree(inputFitData_->fileName(), inputFitData_->treeName()); sPlotNtuple_->writeOutGenResults(); LauSPlot splot(sPlotNtuple_->fileName(), sPlotNtuple_->treeName(), this->firstExpt(), this->nExpt(), this->variableNames(), this->freeSpeciesNames(), this->fixdSpeciesNames(), this->twodimPDFs(), this->splitSignal(), this->scfDPSmear()); splot.runCalculations(sPlotVerbosity_); splot.writeOutResults(); } } void LauAbsFitModel::compareFitData(UInt_t toyMCScale, const TString& mcFileName, const TString& tableFileName, Bool_t poissonSmearing) { compareFitData_ = kTRUE; fitToyMCScale_ = toyMCScale; fitToyMCFileName_ = mcFileName; fitToyMCTableName_ = tableFileName; fitToyMCPoissonSmear_ = poissonSmearing; } void LauAbsFitModel::createFitToyMC(const TString& mcFileName, const TString& tableFileName) { // Create a toy MC sample so that the user can compare the fitted // result with the data. // Generate more toy MC to reduce statistical fluctuations: // - use the rescaling value fitToyMCScale_ // Store the info on the number of experiments, first expt and current expt const UInt_t oldNExpt(this->nExpt()); const UInt_t oldFirstExpt(this->firstExpt()); const UInt_t oldIExpt(this->iExpt()); // Turn off Poisson smearing if required const Bool_t poissonSmearing(this->doPoissonSmearing()); this->doPoissonSmearing(fitToyMCPoissonSmear_); // Turn off embedding, since we need toy MC, not reco'ed events const Bool_t enableEmbeddingOrig(this->enableEmbedding()); this->enableEmbedding(kFALSE); // Need to make sure that the generation of the DP co-ordinates is // switched on if any of our PDFs depend on it const Bool_t origUseDP = this->useDP(); if ( !origUseDP && this->pdfsDependOnDP() ) { this->useDP( kTRUE ); this->initialiseDPModels(); } // Construct a unique filename for this experiment TString exptString("_expt"); exptString += oldIExpt; TString fileName( mcFileName ); fileName.Insert( fileName.Last('.'), exptString ); // Generate the toy MC std::cout << "INFO in LauAbsFitModel::createFitToyMC : Generating toy MC in " << fileName << " to compare fit with data...\n"; std::cout << " : Number of experiments to generate = " << fitToyMCScale_ << "\n"; std::cout << " : This is to allow the toy MC to be made with reduced statistical fluctuations\n"; std::cout << " : Number of events in each experiment will be " << (fitToyMCPoissonSmear_ ? "fluctuated according to a Poisson distribution" : "exactly the same") << std::endl; // Set the genValue of each parameter to its current (fitted) value // but first store the original genValues for restoring later std::vector origGenValues; origGenValues.reserve(this->nTotParams()); Bool_t blind(kFALSE); for (LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter) { origGenValues.push_back((*iter)->genValue()); (*iter)->genValue((*iter)->unblindValue()); if ( (*iter)->blind() ) { blind = kTRUE; } } if ( blind ) { std::cerr << "WARNING in LauAbsFitModel::createFitToyMC : One or more parameters are blind but the toy will be created using the unblind values - use with caution!!" << std::endl; } // If we're asked to generate more than 100 experiments then split it // up into multiple files since otherwise can run into memory issues // when building the index // TODO - this obviously depends on the number of events per experiment as well, so should do this properly UInt_t totalExpts = fitToyMCScale_; if ( totalExpts > 100 ) { UInt_t nFiles = totalExpts/100; if ( totalExpts%100 ) { nFiles += 1; } TString fileNameBase {fileName}; for ( UInt_t iFile(0); iFile < nFiles; ++iFile ) { UInt_t firstExp( iFile*100 ); // Set number of experiments and first experiment to generate UInt_t nExp = ((firstExp + 100)>totalExpts) ? totalExpts-firstExp : 100; this->setNExpts(nExp, firstExp); // Create a unique filename and generate the events fileName = fileNameBase; TString extraname = "_file"; extraname += iFile; fileName.Insert( fileName.Last('.'), extraname ); this->generate(fileName, "genResults", "dummy.root", tableFileName); } } else { // Set number of experiments to new value this->setNExpts(fitToyMCScale_, 0); // Generate the toy this->generate(fileName, "genResults", "dummy.root", tableFileName); } // Reset number of experiments to original value this->setNExpts(oldNExpt, oldFirstExpt); this->setCurrentExperiment(oldIExpt); // Restore the Poisson smearing to its former value this->doPoissonSmearing(poissonSmearing); // Restore the embedding status this->enableEmbedding(enableEmbeddingOrig); // Restore "useDP" to its former status this->useDP( origUseDP ); // Restore the original genValue to each parameter for (UInt_t i(0); inTotParams(); ++i) { fitVars_[i]->genValue(origGenValues[i]); } std::cout << "INFO in LauAbsFitModel::createFitToyMC : Finished in createFitToyMC." << std::endl; } Double_t LauAbsFitModel::getTotNegLogLikelihood() { // Calculate the total negative log-likelihood over all events. // This function assumes that the fit parameters and data tree have // already been set-up correctly. // Loop over the data points to calculate the log likelihood Double_t logLike = this->getLogLikelihood( 0, this->eventsPerExpt() ); // Include the Poisson term in the extended likelihood if required if (this->doEMLFit()) { logLike -= this->getEventSum(); } // Calculate any penalty terms from Gaussian constrained variables if ( ! conVars_.empty() ){ logLike -= this->getLogLikelihoodPenalty(); } Double_t totNegLogLike = -logLike; return totNegLogLike; } Double_t LauAbsFitModel::getLogLikelihoodPenalty() { Double_t penalty(0.0); for ( LauAbsRValuePList::const_iterator iter = conVars_.begin(); iter != conVars_.end(); ++iter ) { Double_t val = (*iter)->unblindValue(); Double_t mean = (*iter)->constraintMean(); Double_t width = (*iter)->constraintWidth(); Double_t term = ( val - mean )*( val - mean ); penalty += term/( 2*width*width ); } return penalty; } Double_t LauAbsFitModel::getLogLikelihood( UInt_t iStart, UInt_t iEnd ) { // Calculate the total negative log-likelihood over all events. // This function assumes that the fit parameters and data tree have // already been set-up correctly. // Loop over the data points to calculate the log likelihood Double_t logLike(0.0); const Double_t worstLL = this->worstLogLike(); // Loop over the number of events in this experiment UInt_t badEvents{0}; for (UInt_t iEvt = iStart; iEvt < iEnd; ++iEvt) { Double_t likelihood = this->getTotEvtLikelihood(iEvt); if (likelihood > std::numeric_limits::min()) { // Is the likelihood > zero? Double_t evtLogLike = TMath::Log(likelihood); if ( doSFit_ ) { evtLogLike *= sWeights_[iEvt]; } logLike += evtLogLike; } else { ++badEvents; std::cerr << "WARNING in LauAbsFitModel::getLogLikelihood : Strange likelihood value for event " << iEvt << ": " << likelihood << "\n"; this->printEventData(iEvt); this->printParamValues(); this->printEventLikelihoods(); if ( this->breakOnBadLikelihood() ) { break; } } } if ( badEvents != 0 ) { if ( this->breakOnBadLikelihood() ) { std::cerr << "WARNING in LauAbsFitModel::getLogLikelihood : Found at least one event with a bad likelihood." << std::endl; } else { std::cerr << "WARNING in LauAbsFitModel::getLogLikelihood : Found " << badEvents << " events with bad likelihoods." << std::endl; } std::cerr << " : Returning worst NLL found so far to force MINUIT out of this region." << std::endl; logLike = worstLL; } else if (logLike < worstLL) { this->worstLogLike( logLike ); } return logLike; } void LauAbsFitModel::setParsFromMinuit(Double_t* par, Int_t npar) { // This function sets the internal parameters based on the values // that Minuit is using when trying to minimise the total likelihood function. // MINOS reports different numbers of free parameters depending on the // situation, so disable this check if ( ! this->withinAsymErrorCalc() ) { const UInt_t nFreePars = this->nFreeParams(); if (static_cast(npar) != nFreePars) { std::cerr << "ERROR in LauAbsFitModel::setParsFromMinuit : Unexpected number of free parameters: " << npar << ".\n"; std::cerr << " Expected: " << nFreePars << ".\n" << std::endl; gSystem->Exit(EXIT_FAILURE); } } // Despite npar being the number of free parameters // the par array actually contains all the parameters, // free and floating... // Update all the floating ones with their new values // Also check if we have any parameters on which the DP integrals depend // and whether they have changed since the last iteration Bool_t recalcNorm(kFALSE); const LauParameterPSet::const_iterator resVarsEnd = resVars_.end(); for (UInt_t i(0); inTotParams(); ++i) { if (!fitVars_[i]->fixed()) { if ( resVars_.find( fitVars_[i] ) != resVarsEnd ) { if ( fitVars_[i]->value() != par[i] ) { recalcNorm = kTRUE; } } fitVars_[i]->value(par[i]); } } // If so, then recalculate the normalisation if (recalcNorm) { this->recalculateNormalisation(); } this->propagateParUpdates(); } UInt_t LauAbsFitModel::addFitParameters(LauParameter* param, const Bool_t addFixed) { UInt_t nParsAdded{0}; // check the pointer is valid if ( ! param ) { return nParsAdded; } // if we're dealing with a clone, // we should instead deal with its parent if ( param->clone() ) { param = param->parent(); } // we need to include the parameter if it is either: // - floating // - currently fixed but will float in the second stage of a two-stage fit if ( addFixed || ! param->fixed() || ( this->twoStageFit() && param->secondStage() ) ) { // check whether we already have this parameter stored auto [ _, addedOK ] { fitVarsSet_.insert( param ) }; if ( addedOK ) { // if not, add it to the list fitVars_.push_back( param ); ++nParsAdded; } } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauParameterPList& paramList, const Bool_t addFixed) { UInt_t nParsAdded{0}; for ( auto param : paramList ) { nParsAdded += this->addFitParameters( param, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(std::vector>& paramList, const Bool_t addFixed) { UInt_t nParsAdded{0}; for ( auto& paramArray : paramList ) { nParsAdded += this->addFitParameters( paramArray[0], addFixed ); nParsAdded += this->addFitParameters( paramArray[1], addFixed ); nParsAdded += this->addFitParameters( paramArray[2], addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauAbsRValue* param, const Bool_t addFixed) { UInt_t nParsAdded{0}; // check the pointer is valid if ( ! param ) { return nParsAdded; } LauParameterPList pars { param->getPars() }; for ( auto par : pars ) { nParsAdded += this->addFitParameters( par, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauAbsRValuePList& paramList, const Bool_t addFixed) { UInt_t nParsAdded{0}; for ( auto param : paramList ) { nParsAdded += this->addFitParameters( param, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addFitParameters(LauPdfPList& pdfList, const Bool_t addFixed) { UInt_t nParsAdded{0}; for ( auto pdf : pdfList ) { if ( pdf->isDPDependent() ) { this->pdfsDependOnDP( kTRUE ); } LauAbsRValuePList& pars = pdf->getParameters(); nParsAdded += this->addFitParameters( pars, addFixed ); } return nParsAdded; } UInt_t LauAbsFitModel::addResonanceParameters(LauParameter* param) { UInt_t nParsAdded{0}; // first check if we have this parameter in the set of resonance parameters auto [ _, addedOK ] { resVars_.insert( param ) }; if ( addedOK ) { nParsAdded += this->addFitParameters( param ); } return nParsAdded; } UInt_t LauAbsFitModel::addResonanceParameters(LauParameterPList& paramList) { UInt_t nParsAdded{0}; for ( auto param : paramList ) { nParsAdded += this->addResonanceParameters( param ); } return nParsAdded; } void LauAbsFitModel::addConParameters() { for ( LauParameterPList::const_iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter ) { if ( (*iter)->gaussConstraint() ) { conVars_.push_back( *iter ); std::cout << "INFO in LauAbsFitModel::addConParameters : Added Gaussian constraint to parameter "<< (*iter)->name() << std::endl; } } // Add penalties from the constraints to fit parameters const std::vector& storeCon = this->constraintsStore(); for ( std::vector::const_iterator iter = storeCon.begin(); iter != storeCon.end(); ++iter ) { const std::vector& names = (*iter).conPars_; std::vector params; for ( std::vector::const_iterator iternames = names.begin(); iternames != names.end(); ++iternames ) { for ( LauParameterPList::const_iterator iterfit = fitVars_.begin(); iterfit != fitVars_.end(); ++iterfit ) { if ( (*iternames) == (*iterfit)->name() ){ params.push_back(*iterfit); } } } // If the parameters are not found, skip it if ( params.size() != (*iter).conPars_.size() ) { std::cerr << "WARNING in LauAbsFitModel::addConParameters: Could not find parameters to constrain in the formula... skipping" << std::endl; continue; } LauFormulaPar* formPar = new LauFormulaPar( (*iter).formula_, (*iter).formula_, params ); formPar->addGaussianConstraint( (*iter).mean_, (*iter).width_ ); conVars_.push_back(formPar); std::cout << "INFO in LauAbsFitModel::addConParameters : Added Gaussian constraint to formula\n"; std::cout << " : Formula: " << (*iter).formula_ << std::endl; for ( std::vector::iterator iterparam = params.begin(); iterparam != params.end(); ++iterparam ) { std::cout << " : Parameter: " << (*iterparam)->name() << std::endl; } } } void LauAbsFitModel::updateFitParameters(LauPdfPList& pdfList) { for (LauPdfPList::iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { (*pdf_iter)->updatePulls(); } } void LauAbsFitModel::printFitParameters(const LauPdfPList& pdfList, std::ostream& fout) const { LauPrint print; for (LauPdfPList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { const LauAbsRValuePList& pars = (*pdf_iter)->getParameters(); for (LauAbsRValuePList::const_iterator pars_iter = pars.begin(); pars_iter != pars.end(); ++pars_iter) { LauParameterPList params = (*pars_iter)->getPars(); for (LauParameterPList::iterator params_iter = params.begin(); params_iter != params.end(); ++params_iter) { if (!(*params_iter)->clone()) { fout << (*params_iter)->name() << " & $"; print.printFormat(fout, (*params_iter)->value()); if ((*params_iter)->fixed() == kTRUE) { fout << "$ (fixed) \\\\"; } else { fout << " \\pm "; print.printFormat(fout, (*params_iter)->error()); fout << "$ \\\\" << std::endl; } } } } } } void LauAbsFitModel::cacheInfo(LauPdfPList& pdfList, const LauFitDataTree& theData) { for (LauPdfPList::iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { (*pdf_iter)->cacheInfo(theData); } } Double_t LauAbsFitModel::prodPdfValue(LauPdfPList& pdfList, UInt_t iEvt) { Double_t pdfVal = 1.0; for (LauPdfPList::iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { (*pdf_iter)->calcLikelihoodInfo(iEvt); pdfVal *= (*pdf_iter)->getLikelihood(); } return pdfVal; } void LauAbsFitModel::printPdfValues(const LauPdfPList& pdfList, const TString& componentName) const { for ( const auto& pdf : pdfList ) { - std::cout << componentName << " " << pdf->varName() << " likelihood = " << pdf->getLikelihood() << "\n"; + std::cerr << componentName << " " << pdf->varName() << " likelihood = " << pdf->getLikelihood() << "\n"; } } void LauAbsFitModel::printEventData(UInt_t iEvt) const { const LauFitData& data = inputFitData_->getData(iEvt); - std::cerr << " : Input data values for this event:" << std::endl; + std::cerr << "WARNING in LauAbsFitModel::printEventData : Input data values for this event:" << std::endl; for (LauFitData::const_iterator iter = data.begin(); iter != data.end(); ++iter) { std::cerr << " " << iter->first << " = " << iter->second << std::endl; } } void LauAbsFitModel::printParamValues() const { - std::cerr << " : Current values of fit parameters:" << std::endl; + std::cerr << "WARNING in LauAbsFitModel::printParamValues : Current values of fit parameters:" << std::endl; for (UInt_t i(0); inTotParams(); ++i) { std::cerr << " " << (fitVars_[i]->name()).Data() << " = " << fitVars_[i]->value() << std::endl; } } void LauAbsFitModel::prepareInitialParArray( TObjArray& array ) { // Update initial fit parameters if required (e.g. if using random numbers). this->checkInitFitParams(); // Store the total number of parameters and the number of free parameters UInt_t nPars = fitVars_.size(); UInt_t nFreePars = 0; // Send the fit parameters for ( LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter ) { if ( ! (*iter)->fixed() ) { ++nFreePars; } array.Add( *iter ); } this->startNewFit( nPars, nFreePars ); } void LauAbsFitModel::finaliseExperiment( const LauAbsFitter::FitStatus& fitStat, const TObjArray* parsFromCoordinator, const TMatrixD* covMat, TObjArray& parsToCoordinator ) { // Copy the fit status information this->storeFitStatus( fitStat, *covMat ); // Now process the parameters const UInt_t nPars = this->nTotParams(); UInt_t nParsFromCoordinator = parsFromCoordinator->GetEntries(); if ( nParsFromCoordinator != nPars ) { std::cerr << "ERROR in LauAbsFitModel::finaliseExperiment : Unexpected number of parameters received from coordinator" << std::endl; std::cerr << " : Received " << nParsFromCoordinator << " when expecting " << nPars << std::endl; gSystem->Exit( EXIT_FAILURE ); } for ( UInt_t iPar(0); iPar < nParsFromCoordinator; ++iPar ) { LauParameter* parameter = dynamic_cast( (*parsFromCoordinator)[iPar] ); if ( ! parameter ) { std::cerr << "ERROR in LauAbsFitModel::finaliseExperiment : Error reading parameter from coordinator" << std::endl; gSystem->Exit( EXIT_FAILURE ); } if ( parameter->name() != fitVars_[iPar]->name() ) { std::cerr << "ERROR in LauAbsFitModel::finaliseExperiment : Error reading parameter from coordinator" << std::endl; gSystem->Exit( EXIT_FAILURE ); } *(fitVars_[iPar]) = *parameter; } // Write the results into the ntuple this->finaliseFitResults( outputTableName_ ); // Store the per-event likelihood values if ( this->writeSPlotData() ) { this->storePerEvtLlhds(); } // Create a toy MC sample using the fitted parameters so that // the user can compare the fit to the data. if (compareFitData_ == kTRUE && fitStat.status == 3) { this->createFitToyMC(fitToyMCFileName_, fitToyMCTableName_); } // Send the finalised fit parameters for ( LauParameterPList::iterator iter = fitVars_.begin(); iter != fitVars_.end(); ++iter ) { parsToCoordinator.Add( *iter ); } } UInt_t LauAbsFitModel::readExperimentData() { // retrieve the data and find out how many events have been read const UInt_t exptIndex = this->iExpt(); inputFitData_->readExperimentData( exptIndex ); const UInt_t nEvent = inputFitData_->nEvents(); this->eventsPerExpt( nEvent ); return nEvent; } void LauAbsFitModel::setParametersFromFile(const TString& fileName, const TString& treeName, const Bool_t fix) { fixParamFileName_ = fileName; fixParamTreeName_ = treeName; fixParams_ = fix; } void LauAbsFitModel::setParametersFromMap(const std::map& parameters, const Bool_t fix) { fixParamMap_ = parameters; fixParams_ = fix; } void LauAbsFitModel::setNamedParameters(const TString& fileName, const TString& treeName, const std::set& parameters, const Bool_t fix) { fixParamFileName_ = fileName; fixParamTreeName_ = treeName; fixParamNames_ = parameters; fixParams_ = fix; } void LauAbsFitModel::setParametersFileFallback(const TString& fileName, const TString& treeName, const std::map& parameters, const Bool_t fix) { fixParamFileName_ = fileName; fixParamTreeName_ = treeName; fixParamMap_ = parameters; fixParams_ = fix; } diff --git a/src/LauTimeDepFitModel.cc b/src/LauTimeDepFitModel.cc index 591b244..8adaf43 100644 --- a/src/LauTimeDepFitModel.cc +++ b/src/LauTimeDepFitModel.cc @@ -1,3280 +1,3283 @@ /* Copyright 2006 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauTimeDepFitModel.cc \brief File containing implementation of LauTimeDepFitModel class. */ #include #include #include #include #include #include #include #include "TFile.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDPPartialIntegralInfo.hh" #include "LauDaughters.hh" #include "LauDecayTimePdf.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauIsobarDynamics.hh" #include "LauKinematics.hh" #include "LauParamFixed.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauTimeDepFitModel.hh" #include "LauFlavTag.hh" ClassImp(LauTimeDepFitModel) LauTimeDepFitModel::LauTimeDepFitModel(LauIsobarDynamics* modelB0bar, LauIsobarDynamics* modelB0, LauFlavTag* flavTag) : LauAbsFitModel(), sigModelB0bar_(modelB0bar), sigModelB0_(modelB0), kinematicsB0bar_(modelB0bar ? modelB0bar->getKinematics() : 0), kinematicsB0_(modelB0 ? modelB0->getKinematics() : 0), usingBkgnd_(kFALSE), flavTag_(flavTag), curEvtTrueTagFlv_(LauFlavTag::Flavour::Unknown), curEvtDecayFlv_(LauFlavTag::Flavour::Unknown), nSigComp_(0), nSigDPPar_(0), nDecayTimePar_(0), nExtraPdfPar_(0), nNormPar_(0), nCalibPar_(0), nTagEffPar_(0), nEffiPar_(0), nAsymPar_(0), coeffsB0bar_(0), coeffsB0_(0), coeffPars_(0), fitFracB0bar_(0), fitFracB0_(0), fitFracAsymm_(0), acp_(0), meanEffB0bar_("meanEffB0bar",0.0,0.0,1.0), meanEffB0_("meanEffB0",0.0,0.0,1.0), DPRateB0bar_("DPRateB0bar",0.0,0.0,100.0), DPRateB0_("DPRateB0",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), cpevVarName_(""), cpEigenValue_(CPEven), evtCPEigenVals_(0), deltaM_("deltaM",0.0), deltaGamma_("deltaGamma",0.0), tau_("tau",LauConstants::tauB0), phiMix_("phiMix", 2.0*LauConstants::beta, -LauConstants::threePi, LauConstants::threePi, kFALSE), sinPhiMix_("sinPhiMix", TMath::Sin(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), cosPhiMix_("cosPhiMix", TMath::Cos(2.0*LauConstants::beta), -1.0, 1.0, kFALSE), useSinCos_(kFALSE), phiMixComplex_(TMath::Cos(-2.0*LauConstants::beta),TMath::Sin(-2.0*LauConstants::beta)), signalDecayTimePdf_(), BkgndTypes_(flavTag_->getBkgndTypes()), BkgndDecayTimePdfs_(), curEvtDecayTime_(0.0), curEvtDecayTimeErr_(0.0), sigExtraPdf_(), AProd_("AProd",0.0,-1.0,1.0,kTRUE), iterationsMax_(100000000), nGenLoop_(0), ASq_(0.0), aSqMaxVar_(0.0), aSqMaxSet_(1.25), storeGenAmpInfo_(kFALSE), signalTree_(), reuseSignal_(kFALSE), sigDPLike_(0.0), sigExtraLike_(0.0), sigTotalLike_(0.0) { // Set up ftag here? this->setBkgndClassNames(flavTag_->getBkgndNames()); const std::size_t nBkgnds { this->nBkgndClasses() }; if ( nBkgnds > 0 ){ usingBkgnd_ = kTRUE; for ( std::size_t iBkgnd{0}; iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass { this->bkgndClassName( iBkgnd ) }; AProdBkgnd_[iBkgnd] = new LauParameter("AProd_"+bkgndClass,0.0,-1.0,1.0,kTRUE); } } // Make sure that the integration scheme will be symmetrised sigModelB0bar_->forceSymmetriseIntegration(kTRUE); sigModelB0_->forceSymmetriseIntegration(kTRUE); } LauTimeDepFitModel::~LauTimeDepFitModel() { for ( LauAbsPdf* pdf : sigExtraPdf_ ) { delete pdf; } for(auto& data : bkgndTree_){ delete data; } } void LauTimeDepFitModel::setupBkgndVectors() { UInt_t nBkgnds { this->nBkgndClasses() }; AProdBkgnd_.resize( nBkgnds ); BkgndDPModelsB_.resize( nBkgnds ); BkgndDPModelsBbar_.resize( nBkgnds ); BkgndDecayTimePdfs_.resize( nBkgnds ); BkgndPdfs_.resize( nBkgnds ); bkgndEvents_.resize( nBkgnds ); bkgndAsym_.resize( nBkgnds ); bkgndTree_.resize( nBkgnds ); reuseBkgnd_.resize( nBkgnds ); bkgndDPLike_.resize( nBkgnds ); bkgndExtraLike_.resize( nBkgnds ); bkgndTotalLike_.resize( nBkgnds ); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; TString yieldName = signalEvents_->name(); if ( ! yieldName.BeginsWith("signalEvents") ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The signal yield parameter name should start with \"signalEvents\" (plus any optional suffix)." << std::endl; gSystem->Exit(EXIT_FAILURE); } Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0),2.0*(TMath::Abs(value)+1.0)); TString asymName = yieldName; asymName.ReplaceAll( "Events", "Asym" ); signalAsym_ = new LauParameter(asymName,0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; TString yieldName = signalEvents_->name(); if ( ! yieldName.BeginsWith("signalEvents") ) { std::cerr << "ERROR in LauTimeDepFitModel::setNSigEvents : The signal yield parameter name should start with \"signalEvents\" (plus any optional suffix)." << std::endl; gSystem->Exit(EXIT_FAILURE); } Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; TString asymName = yieldName; asymName.ReplaceAll( "Events", "Asym" ); signalAsym_->name(asymName); signalAsym_->range(-1.0,1.0); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBgkndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } TString yieldName = nBkgndEvents->name(); TString bkgndClassName = yieldName; if ( bkgndClassName.Contains("Events") ) { bkgndClassName.Remove( bkgndClassName.Index("Events") ); } else { yieldName += "Events"; nBkgndEvents->name( yieldName ); } if ( ! this->validBkgndClass( bkgndClassName ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << bkgndClassName << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( bkgndClassName ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; TString asymName = yieldName; asymName.ReplaceAll( "Events", "Asym" ); bkgndAsym_[bkgndID] = new LauParameter(asymName,0.0,-1.0,1.0,kTRUE); } void LauTimeDepFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( bkgndAsym == 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : The background asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } TString yieldName = nBkgndEvents->name(); TString bkgndClassName = yieldName; if ( bkgndClassName.Contains("Events") ) { bkgndClassName.Remove( bkgndClassName.Index("Events") ); } else { yieldName += "Events"; nBkgndEvents->name( yieldName ); } TString asymName = yieldName; asymName.ReplaceAll( "Events", "Asym" ); bkgndAsym->name( asymName ); if ( ! this->validBkgndClass( bkgndClassName ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : Invalid background class \"" << bkgndClassName << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( bkgndClassName ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauTimeDepFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; if ( bkgndAsym->isLValue() ) { LauParameter* asym = dynamic_cast( bkgndAsym ); asym->range(-1.0, 1.0); } bkgndAsym_[bkgndID] = bkgndAsym; } void LauTimeDepFitModel::setSignalDtPdf(LauDecayTimePdf* pdf) { if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalDtPdf : The PDF pointer is null, not adding it."<validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDtPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDecayTimePdfs_[bkgndID] = pdf; usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* BModel, LauAbsBkgndDPModel* BbarModel) { if (BModel==nullptr) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the particle model." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndDPModels : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndDPModelsB_[bkgndID] = BModel; if (BbarModel==nullptr) { std::cout << "INFO in LauTimeDepFitModel::setBkgndDPModels : the model pointer is null for the anti-particle model. Using only the particle model." << std::endl; BkgndDPModelsBbar_[bkgndID] = nullptr; } else { BkgndDPModelsBbar_[bkgndID] = BbarModel; } usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setSignalPdfs(LauAbsPdf* pdf) { // These "extra variables" are assumed to be purely kinematical, like mES and DeltaE //or making use of Rest of Event information, and therefore independent of whether //the parent is a B0 or a B0bar. If this assupmtion doesn't hold, do modify this part! if (pdf==0) { std::cerr<<"ERROR in LauTimeDepFitModel::setSignalPdfs : The PDF pointer is null."<validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); BkgndPdfs_[bkgndID].push_back(pdf); usingBkgnd_ = kTRUE; } void LauTimeDepFitModel::setPhiMix(const Double_t phiMix, const Bool_t fixPhiMix, const Bool_t useSinCos) { phiMix_.value(phiMix); phiMix_.initValue(phiMix); phiMix_.genValue(phiMix); phiMix_.fixed(fixPhiMix); const Double_t sinPhiMix = TMath::Sin(phiMix); sinPhiMix_.value(sinPhiMix); sinPhiMix_.initValue(sinPhiMix); sinPhiMix_.genValue(sinPhiMix); sinPhiMix_.fixed(fixPhiMix); const Double_t cosPhiMix = TMath::Cos(phiMix); cosPhiMix_.value(cosPhiMix); cosPhiMix_.initValue(cosPhiMix); cosPhiMix_.genValue(cosPhiMix); cosPhiMix_.fixed(fixPhiMix); useSinCos_ = useSinCos; phiMixComplex_.setRealPart(cosPhiMix); phiMixComplex_.setImagPart(-1.0*sinPhiMix); } void LauTimeDepFitModel::blindPhiMix(const TString& blindingString, const Double_t width, const Bool_t flipSign) { phiMix_.range( -2.0*LauConstants::threePi, 2.0*LauConstants::threePi ); phiMix_.blindParameter( blindingString, width, flipSign ); } void LauTimeDepFitModel::blindPhiMix(const TString& sinBlindingString, const TString& cosBlindingString, const Double_t width, const Bool_t flipSign) { if ( ! useSinCos_ ) { std::cerr << "WARNING in LauTimeDepFitModel::blindPhiMix : not using sine/cosine parameters, so using the sine blinding string to blind the phase itself." << std::endl; this->blindPhiMix(sinBlindingString); return; } sinPhiMix_.range( -2.0, 2.0 ); cosPhiMix_.range( -2.0, 2.0 ); sinPhiMix_.blindParameter( sinBlindingString, width, flipSign ); cosPhiMix_.blindParameter( cosBlindingString, width, flipSign ); } void LauTimeDepFitModel::initialise() { // From the initial parameter values calculate the coefficients // so they can be passed to the signal model this->updateCoeffs(); // Initialisation if (this->useDP() == kTRUE) { this->initialiseDPModels(); } // Flavour tagging //flavTag_->initialise(); // Decay-time PDFs signalDecayTimePdf_->initialise(); //Initialise for backgrounds if necessary for (auto& pdf : BkgndDecayTimePdfs_){ pdf->initialise(); } if (!this->useDP() && sigExtraPdf_.empty()) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Signal model doesn't exist for any variable."<Exit(EXIT_FAILURE); } if (this->useDP() == kTRUE) { // Check that we have all the Dalitz-plot models if ((sigModelB0bar_ == 0) || (sigModelB0_ == 0)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : the pointer to one (particle or anti-particle) of the signal DP models is null."<Exit(EXIT_FAILURE); } } // Next check that, if a given component is being used we've got the // right number of PDFs for all the variables involved // TODO - should probably check variable names and so on as well //UInt_t nsigpdfvars(0); //for ( LauPdfPList::const_iterator pdf_iter = sigExtraPdf_.begin(); pdf_iter != sigExtraPdf_.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nsigpdfvars; // } // } //} //if (usingBkgnd_) { // for (LauBkgndPdfsList::const_iterator bgclass_iter = BkgndPdfsB0_.begin(); bgclass_iter != BkgndPdfsB0_.end(); ++bgclass_iter) { // UInt_t nbkgndpdfvars(0); // const LauPdfPList& pdfList = (*bgclass_iter); // for ( LauPdfPList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) { // std::vector varNames = (*pdf_iter)->varNames(); // for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { // ++nbkgndpdfvars; // } // } // } // if (nbkgndpdfvars != nsigpdfvars) { // std::cerr << "ERROR in LauTimeDepFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // } //} // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the decay time models this->setDecayTimeParameters(); // Set the fit parameters for the extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Handle flavour-tagging calibration parameters this->setCalibParams(); // Add tagging efficiency parameters this->setTagEffParams(); //Asymmetry terms AProd and in setAsymmetries()? this->setAsymParams(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nDecayTimePar_ + nExtraPdfPar_ + nNormPar_ + nCalibPar_ + nTagEffPar_ + nEffiPar_ + nAsymPar_)) { std::cerr<<"ERROR in LauTimeDepFitModel::initialise : Number of fit parameters not of expected size."<Exit(EXIT_FAILURE); } if (sigModelB0_ == 0) { std::cerr<<"ERROR in LauTimeDepFitModel::initialiseDPModels : B0 signal DP model doesn't exist"<Exit(EXIT_FAILURE); } // Need to check that the number of components we have and that the dynamics has matches up const UInt_t nAmpB0bar = sigModelB0bar_->getnTotAmp(); const UInt_t nAmpB0 = sigModelB0_->getnTotAmp(); if ( nAmpB0bar != nAmpB0 ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Unequal number of signal DP components in the particle and anti-particle models: " << nAmpB0bar << " != " << nAmpB0 << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nAmpB0bar != nSigComp_ ) { std::cerr << "ERROR in LauTimeDepFitModel::initialiseDPModels : Number of signal DP components in the model (" << nAmpB0bar << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } std::cout<<"INFO in LauTimeDepFitModel::initialiseDPModels : Initialising signal DP model"<initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); fifjEffSum_.clear(); fifjEffSum_.resize(nSigComp_); for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { fifjEffSum_[iAmp].resize(nSigComp_); } // calculate the integrals of the A*Abar terms this->calcInterferenceTermIntegrals(); this->calcInterferenceTermNorm(); // Add backgrounds if (usingBkgnd_ == kTRUE) { for (auto& model : BkgndDPModelsB_){ model->initialise(); } for (auto& model : BkgndDPModelsBbar_){ if (model != nullptr) { model->initialise(); } } } } void LauTimeDepFitModel::calcInterferenceTermIntegrals() { const std::vector& integralInfoListB0bar = sigModelB0bar_->getIntegralInfos(); const std::vector& integralInfoListB0 = sigModelB0_->getIntegralInfos(); // TODO should check (first time) that they match in terms of number of entries in the vectors and that each entry has the same number of points, ranges, weights etc. LauComplex A, Abar, fifjEffSumTerm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { fifjEffSum_[iAmp][jAmp].zero(); } } const UInt_t nIntegralRegions = integralInfoListB0bar.size(); for ( UInt_t iRegion(0); iRegion < nIntegralRegions; ++iRegion ) { const LauDPPartialIntegralInfo* integralInfoB0bar = integralInfoListB0bar[iRegion]; const LauDPPartialIntegralInfo* integralInfoB0 = integralInfoListB0[iRegion]; const UInt_t nm13Points = integralInfoB0bar->getnm13Points(); const UInt_t nm23Points = integralInfoB0bar->getnm23Points(); for (UInt_t m13 = 0; m13 < nm13Points; ++m13) { for (UInt_t m23 = 0; m23 < nm23Points; ++m23) { const Double_t weight = integralInfoB0bar->getWeight(m13,m23); const Double_t eff = integralInfoB0bar->getEfficiency(m13,m23); const Double_t effWeight = eff*weight; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { A = integralInfoB0->getAmplitude(m13, m23, iAmp); for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { Abar = integralInfoB0bar->getAmplitude(m13, m23, jAmp); fifjEffSumTerm = Abar*A.conj(); fifjEffSumTerm.rescale(effWeight); fifjEffSum_[iAmp][jAmp] += fifjEffSumTerm; } } } } } } void LauTimeDepFitModel::calcInterferenceTermNorm() { const std::vector& fNormB0bar = sigModelB0bar_->getFNorm(); const std::vector& fNormB0 = sigModelB0_->getFNorm(); LauComplex norm; for (UInt_t iAmp = 0; iAmp < nSigComp_; ++iAmp) { for (UInt_t jAmp = 0; jAmp < nSigComp_; ++jAmp) { LauComplex coeffTerm = coeffsB0bar_[jAmp]*coeffsB0_[iAmp].conj(); coeffTerm *= fifjEffSum_[iAmp][jAmp]; coeffTerm.rescale(fNormB0bar[jAmp] * fNormB0[iAmp]); norm += coeffTerm; } } norm *= phiMixComplex_; interTermReNorm_ = 2.0*norm.re(); interTermImNorm_ = 2.0*norm.im(); } void LauTimeDepFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Is there a component called compName in the signal models? TString compName = coeffSet->name(); TString conjName = sigModelB0bar_->getConjResName(compName); const LauDaughters* daughtersB0bar = sigModelB0bar_->getDaughters(); const LauDaughters* daughtersB0 = sigModelB0_->getDaughters(); const Bool_t conjugate = daughtersB0bar->isConjugate( daughtersB0 ); if ( ! sigModelB0bar_->hasResonance(compName) ) { if ( ! sigModelB0bar_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0bar signal DP model doesn't contain component \""<name( compName ); } if ( conjugate ) { if ( ! sigModelB0_->hasResonance(conjName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<hasResonance(compName) ) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : B0 signal DP model doesn't contain component \""<name() == compName) { std::cerr<<"ERROR in LauTimeDepFitModel::setAmpCoeffSet : Have already set coefficients for \""<index(nSigComp_); coeffPars_.push_back(coeffSet); TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_.push_back(LauParameter(parName, 0.0, -1.0, 1.0)); acp_.push_back(coeffSet->acp()); ++nSigComp_; std::cout<<"INFO in LauTimeDepFitModel::setAmpCoeffSet : Added coefficients for component \""<acp(); LauAsymmCalc asymmCalc(fitFracB0bar_[i][i].value(), fitFracB0_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauTimeDepFitModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauTimeDepFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables for (UInt_t i = 0; i < nSigComp_; ++i) { LauParameterPList pars = coeffPars_[i]->getParameters(); nSigDPPar_ += this->addFitParameters( pars, kTRUE ); } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPList& sigDPParsB0bar = sigModelB0bar_->getFloatingParameters(); LauParameterPList& sigDPParsB0 = sigModelB0_->getFloatingParameters(); nSigDPPar_ += this->addResonanceParameters( sigDPParsB0bar ); nSigDPPar_ += this->addResonanceParameters( sigDPParsB0 ); } UInt_t LauTimeDepFitModel::addFitParameters(LauDecayTimePdf* decayTimePdf) { return this->addFitParameters( decayTimePdf->getParameters()); } UInt_t LauTimeDepFitModel::addFitParameters(std::vector& decayTimePdfList) { UInt_t nParsAdded{0}; for ( auto decayTimePdf : decayTimePdfList ) { nParsAdded += this->addFitParameters( decayTimePdf ); } return nParsAdded; } void LauTimeDepFitModel::setDecayTimeParameters() { nDecayTimePar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setDecayTimeParameters : Setting the initial fit parameters of the DecayTime Pdfs." << std::endl; // Loop over the Dt PDFs nDecayTimePar_ += this->addFitParameters( signalDecayTimePdf_ ); if (usingBkgnd_){ nDecayTimePar_ += this->addFitParameters(BkgndDecayTimePdfs_); } if (useSinCos_) { nDecayTimePar_ += this->addFitParameters( &sinPhiMix_ ); nDecayTimePar_ += this->addFitParameters( &cosPhiMix_ ); } else { nDecayTimePar_ += this->addFitParameters( &phiMix_ ); } } void LauTimeDepFitModel::setExtraPdfParameters() { // Include the parameters of the PDF for each tagging category in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setExtraPdfParameters : Setting the initial fit parameters of the extra Pdfs." << std::endl; nExtraPdfPar_ += this->addFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (auto& pdf : BkgndPdfs_){ nExtraPdfPar_ += this->addFitParameters(pdf); } } } void LauTimeDepFitModel::setFitNEvents() { nNormPar_ = 0; std::cout << "INFO in LauTimeDepFitModel::setFitNEvents : Setting the initial fit parameters of the signal and background yields." << std::endl; // Initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); this->eventsPerExpt(TMath::FloorNint(nTotEvts)); // if doing an extended ML fit add the signal fraction into the fit parameters if (this->doEMLFit()) { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for signal and background components..."<addFitParameters( signalEvents_ ); } else { std::cout<<"INFO in LauTimeDepFitModel::setFitNEvents : Initialising number of events for background components (and hence signal)..."<useDP() == kFALSE) { nNormPar_ += this->addFitParameters( signalAsym_ ); } // TODO arguably should delegate this //LauTagCatParamMap& signalTagCatFrac = flavTag_->getSignalTagCatFrac(); // tagging-category fractions for signal events //for (LauTagCatParamMap::iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // if (iter == signalTagCatFrac.begin()) { // continue; // } // LauParameter* par = &((*iter).second); // fitVars.push_back(par); // ++nNormPar_; //} // Backgrounds if (usingBkgnd_ == kTRUE) { nNormPar_ += this->addFitParameters( bkgndEvents_ ); nNormPar_ += this->addFitParameters( bkgndAsym_ ); } } void LauTimeDepFitModel::setAsymParams() { nAsymPar_ = 0; //Signal nAsymPar_ += this->addFitParameters( &AProd_ ); //Background(s) nAsymPar_ += this->addFitParameters( AProdBkgnd_ ); } void LauTimeDepFitModel::setTagEffParams() { nTagEffPar_ = 0; Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setTagEffParams : Setting the initial fit parameters for flavour tagging efficiencies." << std::endl; if (useAltPars){ std::vector tageff_ave = flavTag_->getTagEffAve(); std::vector tageff_delta = flavTag_->getTagEffDelta(); nTagEffPar_ += this->addFitParameters( tageff_ave ); nTagEffPar_ += this->addFitParameters( tageff_delta ); } else { std::vector tageff_b0 = flavTag_->getTagEffB0(); std::vector tageff_b0bar = flavTag_->getTagEffB0bar(); nTagEffPar_ += this->addFitParameters( tageff_b0 ); nTagEffPar_ += this->addFitParameters( tageff_b0bar ); } if (usingBkgnd_){ if (useAltPars){ auto tageff_ave = flavTag_->getTagEffBkgndAve(); auto tageff_delta = flavTag_->getTagEffBkgndDelta(); for(auto& innerVec : tageff_ave){ nTagEffPar_ += this->addFitParameters( innerVec ); } for(auto& innerVec : tageff_delta){ nTagEffPar_ += this->addFitParameters( innerVec ); } } else { auto tageff_b0 = flavTag_->getTagEffBkgndB0(); auto tageff_b0bar = flavTag_->getTagEffBkgndB0bar(); for(auto& innerVec : tageff_b0){ nTagEffPar_ += this->addFitParameters( innerVec ); } for(auto& innerVec : tageff_b0bar){ nTagEffPar_ += this->addFitParameters( innerVec ); } } } } void LauTimeDepFitModel::setCalibParams() { nCalibPar_ = 0; Bool_t useAltPars = flavTag_->getUseAveDelta(); std::cout << "INFO in LauTimeDepFitModel::setCalibParams : Setting the initial fit parameters of the flavour tagging calibration parameters." << std::endl; if (useAltPars){ std::vector p0pars_ave = flavTag_->getCalibP0Ave(); std::vector p0pars_delta = flavTag_->getCalibP0Delta(); std::vector p1pars_ave = flavTag_->getCalibP1Ave(); std::vector p1pars_delta = flavTag_->getCalibP1Delta(); nCalibPar_ += this->addFitParameters( p0pars_ave ); nCalibPar_ += this->addFitParameters( p0pars_delta ); nCalibPar_ += this->addFitParameters( p1pars_ave ); nCalibPar_ += this->addFitParameters( p1pars_delta ); } else { std::vector p0pars_b0 = flavTag_->getCalibP0B0(); std::vector p0pars_b0bar = flavTag_->getCalibP0B0bar(); std::vector p1pars_b0 = flavTag_->getCalibP1B0(); std::vector p1pars_b0bar = flavTag_->getCalibP1B0bar(); nCalibPar_ += this->addFitParameters( p0pars_b0 ); nCalibPar_ += this->addFitParameters( p0pars_b0bar ); nCalibPar_ += this->addFitParameters( p1pars_b0 ); nCalibPar_ += this->addFitParameters( p1pars_b0bar ); } } void LauTimeDepFitModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the B0 and B0bar fit fractions for each signal component fitFracB0bar_ = sigModelB0bar_->getFitFractions(); if (fitFracB0bar_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetFitFractions(); if (fitFracB0_.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); icalcAsymmetries(kTRUE); // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } // Now add in the DP efficiency values Double_t initMeanEffB0bar = sigModelB0bar_->getMeanEff().initValue(); meanEffB0bar_.value(initMeanEffB0bar); meanEffB0bar_.initValue(initMeanEffB0bar); meanEffB0bar_.genValue(initMeanEffB0bar); extraVars.push_back(meanEffB0bar_); Double_t initMeanEffB0 = sigModelB0_->getMeanEff().initValue(); meanEffB0_.value(initMeanEffB0); meanEffB0_.initValue(initMeanEffB0); meanEffB0_.genValue(initMeanEffB0); extraVars.push_back(meanEffB0_); // Also add in the DP rates Double_t initDPRateB0bar = sigModelB0bar_->getDPRate().initValue(); DPRateB0bar_.value(initDPRateB0bar); DPRateB0bar_.initValue(initDPRateB0bar); DPRateB0bar_.genValue(initDPRateB0bar); extraVars.push_back(DPRateB0bar_); Double_t initDPRateB0 = sigModelB0_->getDPRate().initValue(); DPRateB0_.value(initDPRateB0); DPRateB0_.initValue(initDPRateB0); DPRateB0_.genValue(initDPRateB0); extraVars.push_back(DPRateB0_); } void LauTimeDepFitModel::setAsymmetries(const Double_t AProd, const Bool_t AProdFix){ AProd_.value(AProd); AProd_.fixed(AProdFix); } void LauTimeDepFitModel::setBkgndAsymmetries(const TString& bkgndClass, const Double_t AProd, const Bool_t AProdFix){ // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauTimeDepFitModel::setBkgndAsymmetries : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); AProdBkgnd_[bkgndID]->value( AProd ); AProdBkgnd_[bkgndID]->genValue( AProd ); AProdBkgnd_[bkgndID]->initValue( AProd ); AProdBkgnd_[bkgndID]->fixed( AProdFix ); } void LauTimeDepFitModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } // Finalise the pulls on the decay time parameters signalDecayTimePdf_->updatePulls(); // and for backgrounds if required if (usingBkgnd_){ for (auto& pdf : BkgndDecayTimePdfs_){ pdf->updatePulls(); } } // Finalise the pulls on the flavour tagging parameters flavTag_->updatePulls(); if (useSinCos_) { if ( not sinPhiMix_.fixed() ) { sinPhiMix_.updatePull(); cosPhiMix_.updatePull(); } } else { this->checkMixingPhase(); } if (usingBkgnd_ == kTRUE) { for (auto& params : bkgndEvents_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } for (auto& params : bkgndAsym_){ std::vector parameters = params->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } } // Update the pulls on all the extra PDFs' parameters this->updateFitParameters(sigExtraPdf_); if (usingBkgnd_ == kTRUE) { for (auto& pdf : BkgndPdfs_){ this->updateFitParameters(pdf); } } // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0bar_->calcExtraInfo(); sigModelB0_->updateCoeffs(coeffsB0_); sigModelB0_->calcExtraInfo(); LauParArray fitFracB0bar = sigModelB0bar_->getFitFractions(); if (fitFracB0bar.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); for (UInt_t i(0); iprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauTimeDepFitModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B0bar events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output<<"B0bar FitFraction for component "<useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout<<"\\hline"<name(); resName = resName.ReplaceAll("_", "\\_"); fout< =$ & $"; print.printFormat(fout, meanEffB0bar_.value()); fout << "$ & $"; print.printFormat(fout, meanEffB0_.value()); fout << "$ & & \\\\" << std::endl; if (useSinCos_) { fout << "$\\sinPhiMix =$ & $"; print.printFormat(fout, sinPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, sinPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; fout << "$\\cosPhiMix =$ & $"; print.printFormat(fout, cosPhiMix_.value()); fout << " \\pm "; print.printFormat(fout, cosPhiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } else { fout << "$\\phiMix =$ & $"; print.printFormat(fout, phiMix_.value()); fout << " \\pm "; print.printFormat(fout, phiMix_.error()); fout << "$ & & & & & & & \\\\" << std::endl; } fout << "\\hline \n\\end{tabular}" << std::endl; } if (!sigExtraPdf_.empty()) { fout<<"\\begin{tabular}{|l|c|}"<printFitParameters(sigExtraPdf_, fout); if (usingBkgnd_ == kTRUE && !BkgndPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl; for (auto& pdf : BkgndPdfs_){ this->printFitParameters(pdf, fout); } } fout<<"\\hline \n\\end{tabular}"<updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { this->randomiseInitFitPars(); } } void LauTimeDepFitModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout<<"INFO in LauTimeDepFitModel::randomiseInitFitPars : Randomising the initial values of the coefficients of the DP components (and phiMix)..."<randomiseInitValues(); } phiMix_.randomiseValue(-LauConstants::pi, LauConstants::pi); if (useSinCos_) { sinPhiMix_.initValue(TMath::Sin(phiMix_.initValue())); cosPhiMix_.initValue(TMath::Cos(phiMix_.initValue())); } } LauTimeDepFitModel::LauGenInfo LauTimeDepFitModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually // NB this individual smearing has to be done individually per tagging category as well LauGenInfo nEvtsGen; // Signal // If we're including the DP and decay time we can't decide on the tag // yet, since it depends on the whole DP+dt PDF, however, if // we're not then we need to decide. Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } //TOD sigAysm doesn't do anything here? Double_t sigAsym(0.0); if (this->useDP() == kFALSE) { sigAsym = signalAsym_->genValue(); //TODO fill in here if we care } else { Double_t rateB0bar = sigModelB0bar_->getDPRate().value(); Double_t rateB0 = sigModelB0_->getDPRate().value(); if ( rateB0bar+rateB0 > 1e-30) { sigAsym = (rateB0bar-rateB0)/(rateB0bar+rateB0); } //for (LauTagCatParamMap::const_iterator iter = signalTagCatFrac.begin(); iter != signalTagCatFrac.end(); ++iter) { // const LauParameter& par = iter->second; // Double_t eventsbyTagCat = par.value() * nEvts; // if (this->doPoissonSmearing()) { // eventsbyTagCat = LauRandom::randomFun()->Poisson(eventsbyTagCat); // } // eventsB0[iter->first] = std::make_pair( TMath::Nint(eventsbyTagCat), evtWeight ); //} //nEvtsGen[std::make_pair("signal",0)] = eventsB0; // generate signal event, decide tag later. if (this->doPoissonSmearing()) { nEvts = LauRandom::randomFun()->Poisson(signalEvents_->genValue()); } nEvtsGen["signal"] = std::make_pair( nEvts, evtWeight ); } std::cout<<"INFO in LauTimeDepFitModel::eventsToGenerate : Generating toy MC with:"<bkgndClassName(bkgndID)<<" background events = "<genValue()<eventsToGenerate(); Bool_t genOK(kTRUE); Int_t evtNum(0); const UInt_t nBkgnds = this->nBkgndClasses(); std::vector bkgndClassNames(nBkgnds); std::vector bkgndClassNamesGen(nBkgnds); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); bkgndClassNames[iBkgnd] = name; bkgndClassNamesGen[iBkgnd] = "gen"+name; } // Loop over the hypotheses and generate the appropriate number of // events for each one for (auto& hypo : nEvts){ // find the category of events (e.g. signal) const TString& evtCategory(hypo.first); // Type const TString& type(hypo.first); // Number of events Int_t nEvtsGen( hypo.second.first ); // get the event weight for this category const Double_t evtWeight( hypo.second.second ); auto t1 = std::chrono::high_resolution_clock::now(); for (Int_t iEvt(0); iEvtsetGenNtupleDoubleBranchValue( "evtWeight", evtWeight ); if (evtCategory == "signal") { this->setGenNtupleIntegerBranchValue("genSig",1); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 ); } // All the generate*Event() methods have to fill in curEvtDecayTime_ and curEvtDecayTimeErr_ // In addition, generateSignalEvent has to decide on the tag and fill in curEvtTagFlv_ genOK = this->generateSignalEvent(); } else { this->setGenNtupleIntegerBranchValue("genSig",0); UInt_t bkgndID(0); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { Int_t gen(0); if ( bkgndClassNames[iBkgnd] == type ) { gen = 1; bkgndID = iBkgnd; } this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen ); } genOK = this->generateBkgndEvent(bkgndID); } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPDtBranchValues(); // store DP, decay time and tagging variables in the ntuple } // Store the event's tag and tagging category this->setGenNtupleIntegerBranchValue("cpEigenvalue", cpEigenValue_); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->setGenNtupleIntegerBranchValue(trueTagVarName, curEvtTrueTagFlv_); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName == "" ) { std::cerr<<"ERROR in LauTimeDepFitModel::genExpt : Decay flavour variable not set for QFS decay, see LauFlavTag::setDecayFlvVarName()."<Exit(EXIT_FAILURE); } else { this->setGenNtupleIntegerBranchValue(decayFlvVarName, curEvtDecayFlv_); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; // Loop over the taggers - values set via generateSignalEvent const std::size_t nTaggers {flavTag_->getNTaggers()}; for (std::size_t i=0; isetGenNtupleIntegerBranchValue(tagVarNames[i], curEvtTagFlv_[i]); this->setGenNtupleDoubleBranchValue(mistagVarNames[i], curEvtMistag_[i]); } // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; // Write the values into the tree this->fillGenNtupleBranches(); // Print an occasional progress message if (iEvt%1000 == 0) {std::cout<<"INFO in LauTimeDepFitModel::genExpt : Generated event number "< ms_double = ( t2 - t1 )/nEvtsGen; std::cout<<"INFO in LauTimeDepFitModel::genExpt : Average per-event generation time for "<useDP() && genOK) { sigModelB0bar_->checkToyMC(kTRUE); sigModelB0_->checkToyMC(kTRUE); std::cout<<"aSqMaxSet = "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracB0 = sigModelB0_->getFitFractions(); if (fitFracB0.size() != nSigComp_) { std::cerr<<"ERROR in LauTimeDepFitModel::generate : Fit Fraction array of unexpected dimension: "<Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); meanEffB0_.value(sigModelB0_->getMeanEff().value()); DPRateB0bar_.value(sigModelB0bar_->getDPRate().value()); DPRateB0_.value(sigModelB0_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events if (reuseSignal_ || !genOK) { if (signalTree_) { signalTree_->clearUsedList(); } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = bkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } return genOK; } Bool_t LauTimeDepFitModel::generateSignalEvent() { // Generate signal event, including SCF if necessary. // DP:DecayTime generation follows. // If it's ok, we then generate mES, DeltaE, Fisher/NN... Bool_t genOK(kTRUE); Bool_t generatedEvent(kFALSE); if (this->useDP()) { if (signalTree_) { signalTree_->getEmbeddedEvent(kinematicsB0bar_); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); if (signalTree_->haveBranch("mcMatch")) { Int_t match = TMath::Nint(signalTree_->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); } } } else { nGenLoop_ = 0; // Now generate from the combined DP / decay-time PDF while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown; curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProd_.unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the DP position Double_t m13Sq{0.0}, m23Sq{0.0}; kinematicsB0bar_->genFlatPhaseSpace(m13Sq, m23Sq); // Next, calculate the total A and Abar for the given DP position sigModelB0_->calcLikelihoodInfo(m13Sq, m23Sq); sigModelB0bar_->calcLikelihoodInfo(m13Sq, m23Sq); // Generate decay time const Double_t tMin = signalDecayTimePdf_->minAbscissa(); const Double_t tMax = signalDecayTimePdf_->maxAbscissa(); curEvtDecayTime_ = LauRandom::randomFun()->Uniform(tMin,tMax); // Generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = signalDecayTimePdf_->generateError(kTRUE); // Calculate all the decay time info signalDecayTimePdf_->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); // Retrieve the amplitudes and efficiency from the dynamics const LauComplex& Abar { sigModelB0bar_->getEvtDPAmp() }; const LauComplex& A { sigModelB0_->getEvtDPAmp() }; const Double_t ASq { A.abs2() }; const Double_t AbarSq { Abar.abs2() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // Also retrieve all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // and the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; if ( cpEigenValue_ == QFS) { // Calculate the total intensities for each flavour-specific final state const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dpEff * dtEff }; const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dpEff * dtEff }; const Double_t ASumSq { ATotSq + AbarTotSq }; // Finally we throw the dice to see whether this event should be generated (and, if so, which final state) const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ASumSq / aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASumSq > aSqMaxVar_) {aSqMaxVar_ = ASumSq;} if ( randNum <= ATotSq / aSqMaxSet_ ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } else { // Calculate the DP terms const Double_t aSqSum { ASq + AbarSq }; const Double_t aSqDif { ASq - AbarSq }; const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; const Double_t interTermIm { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.im() : -2.0 * inter.im() }; const Double_t interTermRe { ( cpEigenValue_ == CPEven ) ? 2.0 * inter.re() : -2.0 * inter.re() }; // Combine DP and decay-time info for all terms const Double_t coshTerm { aSqSum * dtCosh }; const Double_t sinhTerm { interTermRe * dtSinh }; const Double_t cosTerm { aSqDif * dtCos }; const Double_t sinTerm { interTermIm * dtSin }; // Sum to obtain the total and multiply by the efficiency // Multiplying the cos and sin terms by the true flavour at production const Double_t ATotSq { ( coshTerm + sinhTerm + curEvtTrueTagFlv_ * ( cosTerm - sinTerm ) ) * dpEff * dtEff }; //Finally we throw the dice to see whether this event should be generated const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ATotSq/aSqMaxSet_ ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ATotSq > aSqMaxVar_) {aSqMaxVar_ = ATotSq;} // Generate the flavour tagging information from the true tag // (we do this after accepting the event to save time) flavTag_->generateEventInfo( curEvtTrueTagFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } } // end of while !generatedEvent loop } // end of if (signalTree_) else control } else { if ( signalTree_ ) { signalTree_->getEmbeddedEvent(0); //curEvtTagFlv_ = TMath::Nint(signalTree_->getValue("tagFlv")); curEvtDecayTimeErr_ = signalTree_->getValue(signalDecayTimePdf_->varErrName()); curEvtDecayTime_ = signalTree_->getValue(signalDecayTimePdf_->varName()); } } // Check whether we have generated the toy MC OK. if (nGenLoop_ >= iterationsMax_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Hit max iterations: setting aSqMaxSet_ to "< aSqMaxSet_) { aSqMaxSet_ = 1.01 * aSqMaxVar_; genOK = kFALSE; std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Found a larger ASq value: setting aSqMaxSet_ to "<updateKinematics(kinematicsB0bar_->getm13Sq(), kinematicsB0bar_->getm23Sq() ); this->generateExtraPdfValues(sigExtraPdf_, signalTree_); } // Check for problems with the embedding if (signalTree_ && (signalTree_->nEvents() == signalTree_->nUsedEvents())) { std::cerr<<"WARNING in LauTimeDepFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events."<clearUsedList(); } return genOK; } Bool_t LauTimeDepFitModel::generateBkgndEvent(UInt_t bkgndID) { // Generate Bkgnd event Bool_t genOK{kTRUE}; //Check necessary ingredients are in place //TODO these checks should be part of a general sanity check during the initialisation phase if (BkgndDPModelsB_[bkgndID] == nullptr){ std::cerr << "ERROR in LauTimeDepFitModel::generateBkgndEvent : Dalitz plot model is missing" << std::endl; gSystem->Exit(EXIT_FAILURE); } if (BkgndDecayTimePdfs_[bkgndID] == nullptr){ std::cerr << "ERROR in LauTimeDepFitModel::generateBkgndEvent : Decay time model is missing" << std::endl; gSystem->Exit(EXIT_FAILURE); } //TODO restore the ability to embed events from an external source //LauAbsBkgndDPModel* model(0); //LauEmbeddedData* embeddedData(0); //LauPdfPList* extraPdfs(0); //LauKinematics* kinematics(0); //model = BkgndDPModels_[bkgndID]; //if (this->enableEmbedding()) { // // find the right embedded data for the current tagging category // LauTagCatEmbDataMap::const_iterator emb_iter = bkgndTree_[bkgndID].find(curEvtTagCat_); // embeddedData = (emb_iter != bkgndTree_[bkgndID].end()) ? emb_iter->second : 0; //} //extraPdfs = &BkgndPdfs_[bkgndID]; //kinematics = kinematicsB0bar_; //if (this->useDP()) { // if (embeddedData) { // embeddedData->getEmbeddedEvent(kinematics); // } else { // if (model == 0) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "ERROR in LauCPFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << bkgndClass << "\"." << std::endl; // gSystem->Exit(EXIT_FAILURE); // } // genOK = model->generate(); // } //} else { // if (embeddedData) { // embeddedData->getEmbeddedEvent(0); // } //} //if (genOK) { // this->generateExtraPdfValues(extraPdfs, embeddedData); //} //// Check for problems with the embedding //if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { // const TString& bkgndClass = this->bkgndClassName(bkgndID); // std::cerr << "WARNING in LauCPFitModel::generateBkgndEvent : Source of embedded " << bkgndClass << " events used up, clearing the list of used events." << std::endl; // embeddedData->clearUsedList(); //} // LauKinematics* kinematics{nullptr}; switch ( BkgndTypes_[bkgndID] ) { case LauFlavTag::BkgndType::Combinatorial: { // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd // NB the true tag doesn't really mean anything for combinatorial background Double_t random = LauRandom::randomFun()->Rndm(); if ( random <= 0.5 * ( 1.0 - AProdBkgnd_[bkgndID]->unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } if ( cpEigenValue_ == CPEigenvalue::QFS ) { if ( BkgndDPModelsBbar_[bkgndID] != nullptr ) { // generate the true decay flavour and the corresponding DP position // (the supply of two DP models indicates a possible asymmetry) const Double_t rateB { BkgndDPModelsB_[bkgndID]->getPdfNorm() }; const Double_t rateBbar { BkgndDPModelsBbar_[bkgndID]->getPdfNorm() }; const Double_t ADet { ( rateBbar - rateB ) / ( rateBbar + rateB ) }; random = LauRandom::randomFun()->Rndm(); if ( random <= 0.5 * ( 1.0 - ADet ) ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; BkgndDPModelsB_[bkgndID]->generate(); kinematics = kinematicsB0_; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; BkgndDPModelsBbar_[bkgndID]->generate(); kinematics = kinematicsB0bar_; } } else { // generate the true decay flavour // (the supply of only a single model indicates no asymmetry) random = LauRandom::randomFun()->Rndm(); if ( random <= 0.5 ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } // generate the DP position BkgndDPModelsB_[bkgndID]->generate(); kinematics = kinematicsB0_; } } else { // mark that the decay flavour is unknown curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; // generate the DP position BkgndDPModelsB_[bkgndID]->generate(); kinematics = kinematicsB0_; } // generate decay time and its error curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematics ); // generate the flavour tagging information from the true tag and decay flavour // (we do this after accepting the event to save time) flavTag_->generateBkgndEventInfo( bkgndID, curEvtTrueTagFlv_, curEvtDecayFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); break; } case LauFlavTag::BkgndType::FlavourSpecific: { const LauDecayTime::FuncType dtType { BkgndDecayTimePdfs_[bkgndID]->getFuncType() }; if ( dtType == LauDecayTime::FuncType::ExpTrig or dtType == LauDecayTime::FuncType::ExpHypTrig ) { const Double_t tMin = BkgndDecayTimePdfs_[bkgndID]->minAbscissa(); const Double_t tMax = BkgndDecayTimePdfs_[bkgndID]->maxAbscissa(); const Double_t maxDtEff { BkgndDecayTimePdfs_[bkgndID]->getMaxEfficiency() }; // TODO - do we need the factor 2? const Double_t ASumSqMax { 2.0 * ( BkgndDPModelsB_[bkgndID]->getMaxHeight() + BkgndDPModelsBbar_[bkgndID]->getMaxHeight() ) * maxDtEff }; nGenLoop_ = 0; Bool_t generatedEvent{kFALSE}; do { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Unknown; curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; // First choose the true tag, accounting for the production asymmetry // CONVENTION WARNING regarding meaning of sign of AProd Double_t random = LauRandom::randomFun()->Rndm(); if (random <= 0.5 * ( 1.0 - AProdBkgnd_[bkgndID]->unblindValue() ) ) { curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // Generate the DP position and calculate the total A^2 and Abar^2 kinematics = BkgndDPModelsB_[bkgndID]->genUniformPoint(); BkgndDPModelsB_[bkgndID]->calcLikelihoodInfo(kinematics); BkgndDPModelsBbar_[bkgndID]->calcLikelihoodInfo(kinematics); // Generate decay time curEvtDecayTime_ = LauRandom::randomFun()->Uniform(tMin,tMax); // Generate the decay time error (NB the kTRUE forces the generation of a new value) curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); // Calculate all the decay time info BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(curEvtDecayTime_,curEvtDecayTimeErr_); // Retrieve the DP intensities const Double_t ASq { BkgndDPModelsB_[bkgndID]->getRawValue() }; const Double_t AbarSq { BkgndDPModelsBbar_[bkgndID]->getRawValue() }; // Also retrieve all the decay time terms const Double_t dtCos { BkgndDecayTimePdfs_[bkgndID]->getCosTerm() }; const Double_t dtCosh { BkgndDecayTimePdfs_[bkgndID]->getCoshTerm() }; // and the decay time acceptance const Double_t dtEff { BkgndDecayTimePdfs_[bkgndID]->getEffiTerm() }; // Calculate the total intensities for each flavour-specific final state const Double_t ATotSq { ( ASq * dtCosh + curEvtTrueTagFlv_ * ASq * dtCos ) * dtEff }; const Double_t AbarTotSq { ( AbarSq * dtCosh - curEvtTrueTagFlv_ * AbarSq * dtCos ) * dtEff }; const Double_t ASumSq { ATotSq + AbarTotSq }; // Finally we throw the dice to see whether this event should be generated (and, if so, which final state) const Double_t randNum = LauRandom::randomFun()->Rndm(); if (randNum <= ASumSq / ASumSqMax ) { generatedEvent = kTRUE; nGenLoop_ = 0; if (ASumSq > ASumSqMax) { std::cerr << "WARNING in LauTimeDepFitModel::generateBkgndEvent : ASumSq > ASumSqMax" << std::endl; } if ( randNum <= ATotSq / ASumSqMax ) { curEvtDecayFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = LauFlavTag::Flavour::Bbar; } //Debug ASumSqMax huge size w.r.t. ASumSq for SDPs //std::cout<<"ASq "<generateBkgndEventInfo( bkgndID, curEvtTrueTagFlv_, curEvtDecayFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } else { nGenLoop_++; } } while (generatedEvent == kFALSE && nGenLoop_ < iterationsMax_); } else { // Hist, Delta, Exp, DeltaExp decay-time types // Since there are no oscillations for these decay-time types, // the true decay flavour must be equal to the true tag flavour // First choose the true tag and decay flavour, accounting for both the production and detection asymmetries // CONVENTION WARNING regarding meaning of sign of AProd and ADet const Double_t AProd { AProdBkgnd_[bkgndID]->unblindValue() }; const Double_t rateB { BkgndDPModelsB_[bkgndID]->getPdfNorm() }; const Double_t rateBbar { BkgndDPModelsBbar_[bkgndID]->getPdfNorm() }; const Double_t ADet { ( rateBbar - rateB ) / ( rateBbar + rateB ) }; const Double_t random = LauRandom::randomFun()->Rndm(); // TODO - is this the correct way to combine the production and detection asymmetries? if ( random <= 0.5 * ( 1.0 - AProd ) * ( 1.0 - ADet ) ) { curEvtDecayFlv_ = curEvtTrueTagFlv_ = LauFlavTag::Flavour::B; } else { curEvtDecayFlv_ = curEvtTrueTagFlv_ = LauFlavTag::Flavour::Bbar; } // generate the DP position if ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) { BkgndDPModelsB_[bkgndID]->generate(); kinematics = kinematicsB0_; } else { BkgndDPModelsBbar_[bkgndID]->generate(); kinematics = kinematicsB0bar_; } // generate decay time and its error curEvtDecayTimeErr_ = BkgndDecayTimePdfs_[bkgndID]->generateError(kTRUE); curEvtDecayTime_ = BkgndDecayTimePdfs_[bkgndID]->generate( kinematics ); // generate the flavour tagging information from the true tag and decay flavour // (we do this after accepting the event to save time) flavTag_->generateBkgndEventInfo( bkgndID, curEvtTrueTagFlv_, curEvtDecayFlv_, curEvtDecayTime_ ); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); } break; } case LauFlavTag::BkgndType::SelfConjugate: // TODO break; case LauFlavTag::BkgndType::NonSelfConjugate: // TODO break; } if ( genOK ) { // Make sure both kinematics objects are up-to-date kinematicsB0_->updateKinematics(kinematics->getm13Sq(), kinematics->getm23Sq() ); kinematicsB0bar_->updateKinematics(kinematics->getm13Sq(), kinematics->getm23Sq() ); this->generateExtraPdfValues(BkgndPdfs_[bkgndID], bkgndTree_[bkgndID]); } return genOK; } void LauTimeDepFitModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleIntegerBranch("cpEigenvalue"); const TString& trueTagVarName { flavTag_->getTrueTagVarName() }; if ( trueTagVarName != "" ) { this->addGenNtupleIntegerBranch(trueTagVarName); } if ( cpEigenValue_ == QFS ) { const TString& decayFlvVarName { flavTag_->getDecayFlvVarName() }; if ( decayFlvVarName == "" ) { std::cerr<<"ERROR in LauTimeDepFitModel::setupGenNtupleBranches : Decay flavour variable not set for QFS decay, see LauFlavTag::setDecayFlvVarName()."<Exit(EXIT_FAILURE); } else { this->addGenNtupleIntegerBranch(decayFlvVarName); } } const std::vector& tagVarNames { flavTag_->getTagVarNames() }; const std::vector& mistagVarNames { flavTag_->getMistagVarNames() }; const std::size_t nTaggers {flavTag_->getNTaggers()}; for (std::size_t taggerID{0}; taggerIDaddGenNtupleIntegerBranch(tagVarNames[taggerID]); this->addGenNtupleDoubleBranch(mistagVarNames[taggerID]); } if (this->useDP() == kTRUE) { // Let's add the decay time variables. this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varName()); if ( signalDecayTimePdf_->varErrName() != "" ) { this->addGenNtupleDoubleBranch(signalDecayTimePdf_->varErrName()); } this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematicsB0bar_->squareDP() && kinematicsB0_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { this->addGenNtupleDoubleBranch("reB0Amp"); this->addGenNtupleDoubleBranch("imB0Amp"); this->addGenNtupleDoubleBranch("reB0barAmp"); this->addGenNtupleDoubleBranch("imB0barAmp"); } } // Let's look at the extra variables for signal in one of the tagging categories for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { const std::vector varNames{ pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { this->addGenNtupleDoubleBranch( varName ); } } } } void LauTimeDepFitModel::setDPDtBranchValues() { // Store the decay time variables. this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varName(),curEvtDecayTime_); if ( signalDecayTimePdf_->varErrName() != "" ) { this->setGenNtupleDoubleBranchValue(signalDecayTimePdf_->varErrName(),curEvtDecayTimeErr_); } // CONVENTION WARNING // TODO check - for now use B0 for any tags //LauKinematics* kinematics(0); //if (curEvtTagFlv_[position]<0) { LauKinematics* kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } // Can add the real and imaginary parts of the B0 and B0bar total // amplitudes seen in the generation (restrict this with a flag // that defaults to false) if ( storeGenAmpInfo_ ) { if ( this->getGenNtupleIntegerBranchValue("genSig")==1 ) { LauComplex Abar = sigModelB0bar_->getEvtDPAmp(); LauComplex A = sigModelB0_->getEvtDPAmp(); this->setGenNtupleDoubleBranchValue("reB0Amp", A.re()); this->setGenNtupleDoubleBranchValue("imB0Amp", A.im()); this->setGenNtupleDoubleBranchValue("reB0barAmp", Abar.re()); this->setGenNtupleDoubleBranchValue("imB0barAmp", Abar.im()); } else { this->setGenNtupleDoubleBranchValue("reB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("imB0Amp", 0.0); this->setGenNtupleDoubleBranchValue("reB0barAmp", 0.0); this->setGenNtupleDoubleBranchValue("imB0barAmp", 0.0); } } } void LauTimeDepFitModel::generateExtraPdfValues(LauPdfPList& extraPdfs, LauEmbeddedData* embeddedData) { // CONVENTION WARNING LauKinematics* kinematics = kinematicsB0_; //LauKinematics* kinematics(0); //if (curEvtTagFlv_<0) { // kinematics = kinematicsB0_; //} else { // kinematics = kinematicsB0bar_; //} // Generate from the extra PDFs for (auto& pdf : extraPdfs){ LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( pdf->varNames() ); } else { genValues = pdf->generate(kinematics); } for (auto& var : genValues){ TString varName = var.first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var.second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } void LauTimeDepFitModel::propagateParUpdates() { // Update the complex mixing phase if (useSinCos_) { phiMixComplex_.setRealPart(cosPhiMix_.unblindValue()); phiMixComplex_.setImagPart(-1.0*sinPhiMix_.unblindValue()); } else { phiMixComplex_.setRealPart(TMath::Cos(-1.0*phiMix_.unblindValue())); phiMixComplex_.setImagPart(TMath::Sin(-1.0*phiMix_.unblindValue())); } // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigModelB0bar_->updateCoeffs(coeffsB0bar_); sigModelB0_->updateCoeffs(coeffsB0_); this->calcInterferenceTermNorm(); } // Update the decay time normalisation if ( signalDecayTimePdf_ ) { signalDecayTimePdf_->propagateParUpdates(); } // TODO // - maybe also need to add an update of the background decay time PDFs here // Update the signal events from the background numbers if not doing an extended fit // And update the tagging category fractions this->updateSigEvents(); } void LauTimeDepFitModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. if (!this->doEMLFit()) { Double_t nTotEvts = this->eventsPerExpt(); Double_t signalEvents = nTotEvts; signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); for (auto& nBkgndEvents : bkgndEvents_){ if ( nBkgndEvents->isLValue() ) { LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*nTotEvts,2.0*nTotEvts); } } // Subtract background events (if any) from signal. if (usingBkgnd_ == kTRUE) { for (auto& nBkgndEvents : bkgndEvents_){ signalEvents -= nBkgndEvents->value(); } } if ( ! signalEvents_->fixed() ) { signalEvents_->value(signalEvents); } } } void LauTimeDepFitModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); evtCPEigenVals_.clear(); const Bool_t hasCPEV = ( (cpevVarName_ != "") && inputFitData->haveBranch( cpevVarName_ ) ); UInt_t nEvents = inputFitData->nEvents(); evtCPEigenVals_.reserve( nEvents ); LauFitData::const_iterator fitdata_iter; for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); // if the CP-eigenvalue is in the data use those, otherwise use the default if ( hasCPEV ) { fitdata_iter = dataValues.find( cpevVarName_ ); const Int_t cpEV = static_cast( fitdata_iter->second ); if ( cpEV == 1 ) { cpEigenValue_ = CPEven; } else if ( cpEV == -1 ) { cpEigenValue_ = CPOdd; } else if ( cpEV == 0 ) { cpEigenValue_ = QFS; } else { std::cerr<<"WARNING in LauTimeDepFitModel::cacheInputFitVars : Unknown value: "<useDP() == kTRUE) { // DecayTime and SigmaDecayTime signalDecayTimePdf_->cacheInfo(*inputFitData); // cache all the backgrounds too for(auto& bg : BkgndDecayTimePdfs_) {bg->cacheInfo(*inputFitData);} } // Flavour tagging information flavTag_->cacheInputFitVars(inputFitData,signalDecayTimePdf_->varName()); // ...and then the extra PDFs if (not sigExtraPdf_.empty()){ this->cacheInfo(sigExtraPdf_, *inputFitData); } if(usingBkgnd_ == kTRUE){ for (auto& pdf : BkgndPdfs_){ this->cacheInfo(pdf, *inputFitData); } } if (this->useDP() == kTRUE) { sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); if (usingBkgnd_ == kTRUE) { for (auto& model : BkgndDPModelsB_){ model->fillDataTree(*inputFitData); } for (auto& model : BkgndDPModelsBbar_){ if (model != nullptr) { model->fillDataTree(*inputFitData); } } } } } Double_t LauTimeDepFitModel::getTotEvtLikelihood(const UInt_t iEvt) { // Get the CP eigenvalue of the current event cpEigenValue_ = evtCPEigenVals_[iEvt]; // Get the DP and DecayTime likelihood for signal (TODO and eventually backgrounds) this->getEvtDPDtLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal (TODO and eventually backgrounds) this->getEvtExtraLikelihoods(iEvt); // Construct the total likelihood for signal, qqbar and BBbar backgrounds Double_t sigLike = sigDPLike_ * sigExtraLike_; Double_t signalEvents = signalEvents_->unblindValue(); // TODO - consider what to do here - do we even want the option not to use the DP in this model? //if ( not this->useDP() ) { //signalEvents *= 0.5 * (1.0 + curEvtTagFlv_ * signalAsym_->unblindValue()); //} // Construct the total event likelihood Double_t likelihood { sigLike * signalEvents }; if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { const Double_t bkgndEvents { bkgndEvents_[bkgndID]->unblindValue() }; likelihood += bkgndEvents*bkgndDPLike_[bkgndID]*bkgndExtraLike_[bkgndID]; } } return likelihood; } Double_t LauTimeDepFitModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); if (usingBkgnd_) { for ( const auto& yieldPar : bkgndEvents_ ) { eventSum += yieldPar->unblindValue(); } } return eventSum; } void LauTimeDepFitModel::getEvtDPDtLikelihood(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. if ( ! this->useDP() ) { // There's always going to be a term in the likelihood for the // signal, so we'd better not zero it. sigDPLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 1.0; } else { bkgndDPLike_[bkgndID] = 0.0; } } return; } // Calculate event quantities // Get the DP dynamics, decay time, and flavour tagging to calculate // everything required for the likelihood calculation sigModelB0bar_->calcLikelihoodInfo(iEvt); sigModelB0_->calcLikelihoodInfo(iEvt); signalDecayTimePdf_->calcLikelihoodInfo(static_cast(iEvt)); flavTag_->updateEventInfo(iEvt); // Retrieve the amplitudes and efficiency from the dynamics LauComplex Abar { sigModelB0bar_->getEvtDPAmp() }; LauComplex A { sigModelB0_->getEvtDPAmp() }; const Double_t dpEff { sigModelB0bar_->getEvtEff() }; // If this is a QFS decay, one of the DP amplitudes needs to be zeroed curEvtDecayFlv_ = LauFlavTag::Flavour::Unknown; if (cpEigenValue_ == QFS){ curEvtDecayFlv_ = flavTag_->getCurEvtDecayFlv(); if ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) { Abar.zero(); } else if ( curEvtDecayFlv_ == LauFlavTag::Flavour::Bbar ) { A.zero(); } else { std::cerr<<"ERROR in LauTimeDepFitModel::getEvtDPDtLikelihood : Decay flavour must be known for QFS decays."<Exit(EXIT_FAILURE); } } // Next calculate the DP terms const Double_t aSqSum { A.abs2() + Abar.abs2() }; const Double_t aSqDif { A.abs2() - Abar.abs2() }; Double_t interTermRe { 0.0 }; Double_t interTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { const LauComplex inter { Abar * A.conj() * phiMixComplex_ }; if ( cpEigenValue_ == CPEven ) { interTermIm = 2.0 * inter.im(); interTermRe = 2.0 * inter.re(); } else { interTermIm = -2.0 * inter.im(); interTermRe = -2.0 * inter.re(); } } // First get all the decay time terms // TODO Backgrounds // Get the decay time acceptance const Double_t dtEff { signalDecayTimePdf_->getEffiTerm() }; // Get all the decay time terms const Double_t dtCos { signalDecayTimePdf_->getCosTerm() }; const Double_t dtSin { signalDecayTimePdf_->getSinTerm() }; const Double_t dtCosh { signalDecayTimePdf_->getCoshTerm() }; const Double_t dtSinh { signalDecayTimePdf_->getSinhTerm() }; // Get the decay time error term const Double_t dtErrLike { signalDecayTimePdf_->getErrTerm() }; // Get flavour tagging terms Double_t omega{1.0}; Double_t omegabar{1.0}; const std::size_t nTaggers { flavTag_->getNTaggers() }; for (std::size_t taggerID{0}; taggerIDgetCapitalOmega(taggerID, LauFlavTag::Flavour::B); omegabar *= flavTag_->getCapitalOmega(taggerID, LauFlavTag::Flavour::Bbar); } const Double_t prodAsym { AProd_.unblindValue() }; const Double_t ftOmegaHyp { ((1.0 - prodAsym)*omega + (1.0 + prodAsym)*omegabar) }; const Double_t ftOmegaTrig { ((1.0 - prodAsym)*omega - (1.0 + prodAsym)*omegabar) }; const Double_t coshTerm { ftOmegaHyp * dtCosh * aSqSum }; const Double_t sinhTerm { ftOmegaHyp * dtSinh * interTermRe }; const Double_t cosTerm { ftOmegaTrig * dtCos * aSqDif }; const Double_t sinTerm { ftOmegaTrig * dtSin * interTermIm }; // Combine all terms to get the total amplitude squared const Double_t ASq { coshTerm + sinhTerm + cosTerm - sinTerm }; // Calculate the DP and time normalisation const Double_t normASqSum { sigModelB0_->getDPNorm() + sigModelB0bar_->getDPNorm() }; const Double_t normASqDiff { sigModelB0_->getDPNorm() - sigModelB0bar_->getDPNorm() }; Double_t normInterTermRe { 0.0 }; Double_t normInterTermIm { 0.0 }; if ( cpEigenValue_ != QFS ) { // TODO - double check this sign flipping here (it's presumably right but...) normInterTermRe = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermReNorm_ : interTermReNorm_; normInterTermIm = ( cpEigenValue_ == CPOdd ) ? -1.0 * interTermImNorm_ : interTermImNorm_; } const Double_t normCoshTerm { signalDecayTimePdf_->getNormTermCosh() }; const Double_t normSinhTerm { signalDecayTimePdf_->getNormTermSinh() }; const Double_t normCosTerm { signalDecayTimePdf_->getNormTermCos() }; const Double_t normSinTerm { signalDecayTimePdf_->getNormTermSin() }; const Double_t normHyp { normASqSum * normCoshTerm + normInterTermRe * normSinhTerm }; const Double_t normTrig { - prodAsym * ( normASqDiff * normCosTerm + normInterTermIm * normSinTerm ) }; // Combine all terms to get the total normalisation const Double_t norm { 2.0 * ( normHyp + normTrig ) }; // Multiply the squared-amplitude by the efficiency (DP and decay time) and decay-time error likelihood // and normalise to obtain the signal likelihood sigDPLike_ = ( ASq * dpEff * dtEff * dtErrLike ) / norm; // Background part // TODO move to new function as getEvtBkgndLikelihoods? const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if ( not usingBkgnd_ ) { bkgndDPLike_[bkgndID] = 0.0; continue; } Double_t omegaBkgnd{1.0}; Double_t omegaBarBkgnd{1.0}; BkgndDecayTimePdfs_[bkgndID]->calcLikelihoodInfo(static_cast(iEvt)); // Consider background type switch ( BkgndTypes_[bkgndID] ) { case LauFlavTag::BkgndType::Combinatorial : { // For combinatorial background the DP and decay-time models factorise completely, just mulitply them // Start with the DP likelihood... if ( (cpEigenValue_ == QFS) and BkgndDPModelsBbar_[bkgndID] != nullptr ) { //Flavour specific (with possible detection asymmetry) // TODO - need to detect CP-eigenstate and two histos case in initialisation and abort since it doesn't make any sense if ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) { bkgndDPLike_[bkgndID] = BkgndDPModelsB_[bkgndID]->getUnNormValue(iEvt); } else { bkgndDPLike_[bkgndID] = BkgndDPModelsBbar_[bkgndID]->getUnNormValue(iEvt); } bkgndDPLike_[bkgndID] /= ( BkgndDPModelsB_[bkgndID]->getPdfNorm() + BkgndDPModelsBbar_[bkgndID]->getPdfNorm() ); } else { // No detection asymmetry (could be QFS or CP-eigenstate - which implies slightly different normalisation) bkgndDPLike_[bkgndID] = BkgndDPModelsB_[bkgndID]->getLikelihood(iEvt); if ( cpEigenValue_ == QFS ) { bkgndDPLike_[bkgndID] *= 0.5; } } // ...include the decay time... switch( BkgndDecayTimePdfs_[bkgndID]->getFuncType() ) { case LauDecayTime::FuncType::Hist : bkgndDPLike_[bkgndID] *= BkgndDecayTimePdfs_[bkgndID]->getHistTerm(); break; case LauDecayTime::FuncType::Exp : bkgndDPLike_[bkgndID] *= ( BkgndDecayTimePdfs_[bkgndID]->getExpTerm() / BkgndDecayTimePdfs_[bkgndID]->getNormTermExp() ); break; // TODO - any other decay time function types that make sense for combinatorial? // - should also have a set of checks in initialise that we have everything we need for the backgrounds and that the various settings make sense default : // TODO as per comment just above, once the above mentioned checks are implemented this error message can be removed std::cerr << "WARNING in LauTimeDepFitModel::getEvtDPDtLikelihood : bkgnd types other than Hist and Exp don't make sense for combinatorial!" << std::endl; break; } // ...include flavour tagging for (std::size_t taggerID{0}; taggerIDgetCapitalOmegaBkgnd(bkgndID, taggerID, LauFlavTag::Flavour::B, curEvtDecayFlv_); } bkgndDPLike_[bkgndID] *= omegaBkgnd; break; } case LauFlavTag::BkgndType::FlavourSpecific : { // DP terms needed by all decay-time cases Double_t Asq { BkgndDPModelsB_[bkgndID]->getUnNormValue(iEvt) }; Double_t Asqbar { BkgndDPModelsBbar_[bkgndID]->getUnNormValue(iEvt) }; if ( cpEigenValue_ == QFS ){ // If the signal is flavour-specific we can know which DP to use, so zero the other one if ( curEvtDecayFlv_ == LauFlavTag::Flavour::B ) { Asqbar = 0.0; } else if ( curEvtDecayFlv_ == LauFlavTag::Flavour::Bbar ) { Asq = 0.0; } } const Double_t AsqSum { Asq + Asqbar }; // DP norm terms needed by all decay-time cases const Double_t AsqNorm { BkgndDPModelsB_[bkgndID]->getPdfNorm() }; const Double_t AsqbarNorm { BkgndDPModelsBbar_[bkgndID]->getPdfNorm() }; const Double_t AsqNormSum { AsqNorm + AsqbarNorm }; // FT terms needed by all decay-time cases omegaBkgnd = omegaBarBkgnd = 1.0; for (std::size_t taggerID{0}; taggerIDgetCapitalOmegaBkgnd(bkgndID, taggerID, LauFlavTag::Flavour::B, curEvtDecayFlv_); omegaBarBkgnd *= flavTag_->getCapitalOmegaBkgnd(bkgndID, taggerID, LauFlavTag::Flavour::Bbar, curEvtDecayFlv_); } const Double_t AProd { AProdBkgnd_[bkgndID]->unblindValue() }; const Double_t ftOmegaHypBkgnd { (1.0 - AProd)*omegaBkgnd + (1.0 + AProd)*omegaBarBkgnd }; switch( BkgndDecayTimePdfs_[bkgndID]->getFuncType() ) { case LauDecayTime::FuncType::Hist: // DP and decay-time still factorise { // Start with the DP terms... bkgndDPLike_[bkgndID] = AsqSum / AsqNormSum; // ...include the decay time... bkgndDPLike_[bkgndID] *= BkgndDecayTimePdfs_[bkgndID]->getHistTerm(); // ...include flavour tagging bkgndDPLike_[bkgndID] *= ( 0.5 * ftOmegaHypBkgnd ); break; } case LauDecayTime::FuncType::Exp : // DP and decay-time still factorise { // Start with the DP terms... bkgndDPLike_[bkgndID] = AsqSum / AsqNormSum; // ...include the decay time... bkgndDPLike_[bkgndID] *= ( BkgndDecayTimePdfs_[bkgndID]->getExpTerm() / BkgndDecayTimePdfs_[bkgndID]->getNormTermExp() ); // ...include flavour tagging bkgndDPLike_[bkgndID] *= ( 0.5 * ftOmegaHypBkgnd ); break; } case LauDecayTime::FuncType::ExpTrig: // DP and decay-time don't factorise case LauDecayTime::FuncType::ExpHypTrig: { // DP and FT terms specific to this case const Double_t AsqDiff { Asq - Asqbar }; const Double_t AsqNormDiff { AsqNorm - AsqbarNorm }; //TODO check this shouldn't be `fabs`ed const Double_t ftOmegaTrigBkgnd { (1.0 - AProd)*omegaBkgnd - (1.0 + AProd)*omegaBarBkgnd }; // decay time terms // Sin and Sinh terms are ignored: the FS modes can't exhibit TD CPV const Double_t dtCoshBkgnd { BkgndDecayTimePdfs_[bkgndID]->getCoshTerm() }; const Double_t dtCosBkgnd { BkgndDecayTimePdfs_[bkgndID]->getCosTerm() }; const Double_t normCoshTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermCosh() }; const Double_t normCosTermBkgnd { BkgndDecayTimePdfs_[bkgndID]->getNormTermCos() }; // Combine the DP, FT, and decay time terms const Double_t coshTermBkgnd { ftOmegaHypBkgnd * dtCoshBkgnd * AsqSum }; const Double_t cosTermBkgnd { ftOmegaTrigBkgnd * dtCosBkgnd * AsqDiff }; // Similarly for the normalisation, see Laura note eq. 41 const Double_t normBkgnd { 2.0 * ( (normCoshTermBkgnd * AsqNormSum) - AProd*(normCosTermBkgnd * AsqNormDiff) ) }; bkgndDPLike_[bkgndID] = (coshTermBkgnd + cosTermBkgnd) / normBkgnd; break; } case LauDecayTime::FuncType::Delta: case LauDecayTime::FuncType::DeltaExp: // TODO as per comment above, once the checks in initialise are implemented this error message can be removed std::cerr << "WARNING in LauTimeDepFitModel::getEvtDPDtLikelihood : bkgnd types Delta and DeltaExp don't make sense!" << std::endl; break; } break; } case LauFlavTag::BkgndType::SelfConjugate : //Copy this from the CPeigenstate signal case std::cerr << "WARNING in LauTimeDepFitModel::getEvtDPDtLikelihood : SelfConjugate states aren't implemented yet!" << std::endl; bkgndDPLike_[bkgndID] = 0.0; break; case LauFlavTag::BkgndType::NonSelfConjugate : // TODO this has been ignored for now since it's not used in the B->Dpipi case std::cerr << "WARNING in LauTimeDepFitModel::getEvtDPDtLikelihood : NonSelfConjugate states aren't implemented yet!" << std::endl; bkgndDPLike_[bkgndID] = 0.0; break; } // Get the decay time acceptance const Double_t dtEffBkgnd { BkgndDecayTimePdfs_[bkgndID]->getEffiTerm() }; // Get the decay time error term const Double_t dtErrLikeBkgnd { BkgndDecayTimePdfs_[bkgndID]->getErrTerm() }; // Include these terms in the background likelihood bkgndDPLike_[bkgndID] *= ( dtEffBkgnd * dtErrLikeBkgnd ); } } void LauTimeDepFitModel::getEvtExtraLikelihoods(const UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; //There's always a likelihood term for signal, so we better not zero it. // First, those independent of the tagging of the event: // signal if ( not sigExtraPdf_.empty() ) { sigExtraLike_ = this->prodPdfValue( sigExtraPdf_, iEvt ); } // Background const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = this->prodPdfValue( BkgndPdfs_[bkgndID], iEvt ); } else { bkgndExtraLike_[bkgndID] = 0.0; } } } void LauTimeDepFitModel::updateCoeffs() { coeffsB0bar_.clear(); coeffsB0_.clear(); coeffsB0bar_.reserve(nSigComp_); coeffsB0_.reserve(nSigComp_); for (UInt_t i = 0; i < nSigComp_; ++i) { coeffsB0bar_.push_back(coeffPars_[i]->antiparticleCoeff()); coeffsB0_.push_back(coeffPars_[i]->particleCoeff()); } } void LauTimeDepFitModel::checkMixingPhase() { Double_t phase = phiMix_.value(); Double_t genPhase = phiMix_.genValue(); // Check now whether the phase lies in the right range (-pi to pi). Bool_t withinRange(kFALSE); while (withinRange == kFALSE) { if (phase > -LauConstants::pi && phase < LauConstants::pi) { withinRange = kTRUE; } else { // Not within the specified range if (phase > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (phase < -LauConstants::pi) { phase += LauConstants::twoPi; } } } // A further problem can occur when the generated phase is close to -pi or pi. // The phase can wrap over to the other end of the scale - // this leads to artificially large pulls so we wrap it back. Double_t diff = phase - genPhase; if (diff > LauConstants::pi) { phase -= LauConstants::twoPi; } else if (diff < -LauConstants::pi) { phase += LauConstants::twoPi; } // finally store the new value in the parameter // and update the pull phiMix_.value(phase); phiMix_.updatePull(); } void LauTimeDepFitModel::embedSignal(const TString& fileName, const TString& treeName, const Bool_t reuseEventsWithinEnsemble, const Bool_t reuseEventsWithinExperiment, const Bool_t useReweighting) { if (signalTree_) { std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Already embedding signal from file."<findBranches(); if (!dataOK) { delete signalTree_; signalTree_ = 0; std::cerr<<"ERROR in LauTimeDepFitModel::embedSignal : Problem creating data tree for embedding."<enableEmbedding(kTRUE); } void LauTimeDepFitModel::embedBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName, const Bool_t reuseEventsWithinEnsemble, const Bool_t reuseEventsWithinExperiment, const Bool_t useReweighting) { if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); LauEmbeddedData* bkgTree = bkgndTree_[bkgndID]; if (bkgTree) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Already embedding background from a file." << std::endl; return; } bkgTree = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = bkgTree->findBranches(); if (!dataOK) { delete bkgTree; bkgTree = 0; std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; useReweighting_ = useReweighting; this->enableEmbedding(kTRUE); } void LauTimeDepFitModel::setupSPlotNtupleBranches() { // add branches for storing the experiment number and the number of // the event within the current experiment this->addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); } // Store the total event likelihood for each species. this->addSPlotNtupleDoubleBranch("sigTotalLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "TotalLike"; this->addSPlotNtupleDoubleBranch(name); } } // Store the DP likelihoods if (this->useDP()) { this->addSPlotNtupleDoubleBranch("sigDPLike"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "DPLike"; this->addSPlotNtupleDoubleBranch(name); } } } // Store the likelihoods for each extra PDF this->addSPlotNtupleBranches(sigExtraPdf_, "sig"); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); this->addSPlotNtupleBranches(BkgndPdfs_[iBkgnd], bkgndClass); } } } void LauTimeDepFitModel::addSPlotNtupleBranches(const LauPdfPList& extraPdfs, const TString& prefix) { // Loop through each of the PDFs for ( const LauAbsPdf* pdf : extraPdfs ) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } } TString name{prefix}; name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr<<"WARNING in LauTimeDepFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs."<calcLikelihoodInfo(iEvt); extraLike = pdf->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString name{prefix}; name += pdf->varName(); name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars{""}; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { allVars += varName; TString name{prefix}; name += varName; name += "Like"; const Double_t indivLike = pdf->getLikelihood( varName ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } } TString name{prefix}; name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr<<"WARNING in LauAllFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs."<useDP()) { nameSet.insert("DP"); } for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Loop over the variables involved in each PDF const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { // If they are not DP coordinates then add them if ( varName != "m13Sq" && varName != "m23Sq" ) { nameSet.insert( varName ); } } } return nameSet; } LauSPlot::NumbMap LauTimeDepFitModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (!par->fixed()) { numbMap[bkgndClass] = par->genValue(); if ( ! par->isLValue() ) { std::cerr << "WARNING in LauTimeDepFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl; } } } } return numbMap; } LauSPlot::NumbMap LauTimeDepFitModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (par->fixed()) { numbMap[bkgndClass] = par->genValue(); } } } return numbMap; } LauSPlot::TwoDMap LauTimeDepFitModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; for ( const LauAbsPdf* pdf : sigExtraPdf_ ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sig", std::make_pair( varNames[0], varNames[1] ) ) ); } } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); for ( const LauAbsPdf* pdf : BkgndPdfs_[iBkgnd] ) { // Count the number of input variables that are not DP variables UInt_t nVars{0}; const std::vector varNames { pdf->varNames() }; for ( const TString& varName : varNames ) { if ( varName != "m13Sq" && varName != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) ); } } } } return twodimMap; } void LauTimeDepFitModel::storePerEvtLlhds() { std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Storing per-event likelihood values..."<fitData(); // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it if (!this->useDP() && this->storeDPEff()) { sigModelB0bar_->initialise(coeffsB0bar_); sigModelB0_->initialise(coeffsB0_); sigModelB0bar_->fillDataTree(*inputFitData); sigModelB0_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(sigModelB0bar_); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { // Find out whether we have B0bar or B0 flavTag_->updateEventInfo(iEvt); curEvtTagFlv_ = flavTag_->getCurEvtTagFlv(); curEvtMistag_ = flavTag_->getCurEvtMistag(); // the DP information this->getEvtDPDtLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigModel->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); } if (this->useDP()) { sigTotalLike_ = sigDPLike_; this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "DPLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]); } } } else { sigTotalLike_ = 1.0; } // the signal PDF values sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigExtraPdf_, "sig", iEvt); // the background PDF values if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); LauPdfPList& pdfs = BkgndPdfs_[iBkgnd]; bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(pdfs, bkgndClass, iEvt); } } // the total likelihoods this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "TotalLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]); } } // fill the tree this->fillSPlotNtupleBranches(); } std::cout<<"INFO in LauTimeDepFitModel::storePerEvtLlhds : Finished storing per-event likelihood values."<printPdfValues( sigExtraPdf_, "Signal" ); + // Print the background information if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { const TString& bkgndName { this->bkgndClassName( bkgndID ) }; - std::cout << bkgndName << " DP+t likelihood = " << bkgndDPLike_[bkgndID] << "\n"; + std::cerr << bkgndName << " DP+t likelihood = " << bkgndDPLike_[bkgndID] << "\n"; this->printPdfValues( BkgndPdfs_[bkgndID], bkgndName ); } } }